Ruby 3.2.3p157 (2024-01-18 revision 52bb2ac0a6971d0391efa2275f7a66bff319087c)
thread.c
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "gc.h"
77#include "hrtime.h"
78#include "internal.h"
79#include "internal/class.h"
80#include "internal/cont.h"
81#include "internal/error.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "mjit.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#if USE_MJIT && defined(HAVE_SYS_WAIT_H)
104#include <sys/wait.h>
105#endif
106
107#ifndef USE_NATIVE_THREAD_PRIORITY
108#define USE_NATIVE_THREAD_PRIORITY 0
109#define RUBY_THREAD_PRIORITY_MAX 3
110#define RUBY_THREAD_PRIORITY_MIN -3
111#endif
112
113static VALUE rb_cThreadShield;
114
115static VALUE sym_immediate;
116static VALUE sym_on_blocking;
117static VALUE sym_never;
118
119enum SLEEP_FLAGS {
120 SLEEP_DEADLOCKABLE = 0x1,
121 SLEEP_SPURIOUS_CHECK = 0x2
122};
123
124#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
125#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
126
127static inline VALUE
128rb_thread_local_storage(VALUE thread)
129{
130 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
131 rb_ivar_set(thread, idLocals, rb_hash_new());
132 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
133 }
134 return rb_ivar_get(thread, idLocals);
135}
136
137static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
138static void sleep_forever(rb_thread_t *th, unsigned int fl);
139static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
140static int rb_threadptr_dead(rb_thread_t *th);
141static void rb_check_deadlock(rb_ractor_t *r);
142static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
143static const char *thread_status_name(rb_thread_t *th, int detail);
144static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
145NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
146static int consume_communication_pipe(int fd);
147static int check_signals_nogvl(rb_thread_t *, int sigwait_fd);
148void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
149
150#define eKillSignal INT2FIX(0)
151#define eTerminateSignal INT2FIX(1)
152static volatile int system_working = 1;
153
155 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
156 rb_thread_t *th;
157 int fd;
158};
159
160/********************************************************************************/
161
162#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
163
165 enum rb_thread_status prev_status;
166};
167
168static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
169static void unblock_function_clear(rb_thread_t *th);
170
171static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
172 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
173static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
174
175#define THREAD_BLOCKING_BEGIN(th) do { \
176 struct rb_thread_sched * const sched = TH_SCHED(th); \
177 RB_GC_SAVE_MACHINE_CONTEXT(th); \
178 thread_sched_to_waiting(sched);
179
180#define THREAD_BLOCKING_END(th) \
181 thread_sched_to_running(sched, th); \
182 rb_ractor_thread_switch(th->ractor, th); \
183} while(0)
184
185#ifdef __GNUC__
186#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
187#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
188#else
189#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
190#endif
191#else
192#define only_if_constant(expr, notconst) notconst
193#endif
194#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
195 struct rb_blocking_region_buffer __region; \
196 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
197 /* always return true unless fail_if_interrupted */ \
198 !only_if_constant(fail_if_interrupted, TRUE)) { \
199 exec; \
200 blocking_region_end(th, &__region); \
201 }; \
202} while(0)
203
204/*
205 * returns true if this thread was spuriously interrupted, false otherwise
206 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
207 */
208#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
209static inline int
210vm_check_ints_blocking(rb_execution_context_t *ec)
211{
212 rb_thread_t *th = rb_ec_thread_ptr(ec);
213
214 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
215 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
216 }
217 else {
218 th->pending_interrupt_queue_checked = 0;
219 RUBY_VM_SET_INTERRUPT(ec);
220 }
221 return rb_threadptr_execute_interrupts(th, 1);
222}
223
224int
225rb_vm_check_ints_blocking(rb_execution_context_t *ec)
226{
227 return vm_check_ints_blocking(ec);
228}
229
230/*
231 * poll() is supported by many OSes, but so far Linux is the only
232 * one we know of that supports using poll() in all places select()
233 * would work.
234 */
235#if defined(HAVE_POLL)
236# if defined(__linux__)
237# define USE_POLL
238# endif
239# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
240# define USE_POLL
241 /* FreeBSD does not set POLLOUT when POLLHUP happens */
242# define POLLERR_SET (POLLHUP | POLLERR)
243# endif
244#endif
245
246static void
247timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
248 const struct timeval *timeout)
249{
250 if (timeout) {
251 *rel = rb_timeval2hrtime(timeout);
252 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
253 *to = rel;
254 }
255 else {
256 *to = 0;
257 }
258}
259
260MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
261void ruby_sigchld_handler(rb_vm_t *); /* signal.c */
262
263static void
264ubf_sigwait(void *ignore)
265{
266 rb_thread_wakeup_timer_thread(0);
267}
268
269#include THREAD_IMPL_SRC
270
271/*
272 * TODO: somebody with win32 knowledge should be able to get rid of
273 * timer-thread by busy-waiting on signals. And it should be possible
274 * to make the GVL in thread_pthread.c be platform-independent.
275 */
276#ifndef BUSY_WAIT_SIGNALS
277# define BUSY_WAIT_SIGNALS (0)
278#endif
279
280#ifndef USE_EVENTFD
281# define USE_EVENTFD (0)
282#endif
283
284#include "thread_sync.c"
285
286void
287rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
288{
290}
291
292void
293rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
294{
296}
297
298void
299rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
300{
302}
303
304void
305rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
306{
308}
309
310static int
311unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
312{
313 do {
314 if (fail_if_interrupted) {
315 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
316 return FALSE;
317 }
318 }
319 else {
320 RUBY_VM_CHECK_INTS(th->ec);
321 }
322
323 rb_native_mutex_lock(&th->interrupt_lock);
324 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
325 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
326
327 VM_ASSERT(th->unblock.func == NULL);
328
329 th->unblock.func = func;
330 th->unblock.arg = arg;
331 rb_native_mutex_unlock(&th->interrupt_lock);
332
333 return TRUE;
334}
335
336static void
337unblock_function_clear(rb_thread_t *th)
338{
339 rb_native_mutex_lock(&th->interrupt_lock);
340 th->unblock.func = 0;
341 rb_native_mutex_unlock(&th->interrupt_lock);
342}
343
344static void
345rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
346{
347 rb_native_mutex_lock(&th->interrupt_lock);
348
349 if (trap) {
350 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
351 }
352 else {
353 RUBY_VM_SET_INTERRUPT(th->ec);
354 }
355 if (th->unblock.func != NULL) {
356 (th->unblock.func)(th->unblock.arg);
357 }
358 else {
359 /* none */
360 }
361 rb_native_mutex_unlock(&th->interrupt_lock);
362}
363
364void
365rb_threadptr_interrupt(rb_thread_t *th)
366{
367 rb_threadptr_interrupt_common(th, 0);
368}
369
370static void
371threadptr_trap_interrupt(rb_thread_t *th)
372{
373 rb_threadptr_interrupt_common(th, 1);
374}
375
376static void
377terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
378{
379 rb_thread_t *th = 0;
380
381 ccan_list_for_each(&r->threads.set, th, lt_node) {
382 if (th != main_thread) {
383 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
384
385 rb_threadptr_pending_interrupt_enque(th, eTerminateSignal);
386 rb_threadptr_interrupt(th);
387
388 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
389 }
390 else {
391 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
392 }
393 }
394}
395
396static void
397rb_threadptr_join_list_wakeup(rb_thread_t *thread)
398{
399 while (thread->join_list) {
400 struct rb_waiting_list *join_list = thread->join_list;
401
402 // Consume the entry from the join list:
403 thread->join_list = join_list->next;
404
405 rb_thread_t *target_thread = join_list->thread;
406
407 if (target_thread->scheduler != Qnil && join_list->fiber) {
408 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
409 }
410 else {
411 rb_threadptr_interrupt(target_thread);
412
413 switch (target_thread->status) {
414 case THREAD_STOPPED:
415 case THREAD_STOPPED_FOREVER:
416 target_thread->status = THREAD_RUNNABLE;
417 default:
418 break;
419 }
420 }
421 }
422}
423
424void
425rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
426{
427 while (th->keeping_mutexes) {
428 rb_mutex_t *mutex = th->keeping_mutexes;
429 th->keeping_mutexes = mutex->next_mutex;
430
431 /* rb_warn("mutex #<%p> remains to be locked by terminated thread", (void *)mutexes); */
432
433 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
434 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
435 }
436}
437
438void
439rb_thread_terminate_all(rb_thread_t *th)
440{
441 rb_ractor_t *cr = th->ractor;
442 rb_execution_context_t * volatile ec = th->ec;
443 volatile int sleeping = 0;
444
445 if (cr->threads.main != th) {
446 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
447 (void *)cr->threads.main, (void *)th);
448 }
449
450 /* unlock all locking mutexes */
451 rb_threadptr_unlock_all_locking_mutexes(th);
452
453 EC_PUSH_TAG(ec);
454 if (EC_EXEC_TAG() == TAG_NONE) {
455 retry:
456 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
457
458 terminate_all(cr, th);
459
460 while (rb_ractor_living_thread_num(cr) > 1) {
461 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
462 /*q
463 * Thread exiting routine in thread_start_func_2 notify
464 * me when the last sub-thread exit.
465 */
466 sleeping = 1;
467 native_sleep(th, &rel);
468 RUBY_VM_CHECK_INTS_BLOCKING(ec);
469 sleeping = 0;
470 }
471 }
472 else {
473 /*
474 * When caught an exception (e.g. Ctrl+C), let's broadcast
475 * kill request again to ensure killing all threads even
476 * if they are blocked on sleep, mutex, etc.
477 */
478 if (sleeping) {
479 sleeping = 0;
480 goto retry;
481 }
482 }
483 EC_POP_TAG();
484}
485
486void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
487
488static void
489thread_cleanup_func_before_exec(void *th_ptr)
490{
491 rb_thread_t *th = th_ptr;
492 th->status = THREAD_KILLED;
493
494 // The thread stack doesn't exist in the forked process:
495 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
496
497 rb_threadptr_root_fiber_terminate(th);
498}
499
500static void
501thread_cleanup_func(void *th_ptr, int atfork)
502{
503 rb_thread_t *th = th_ptr;
504
505 th->locking_mutex = Qfalse;
506 thread_cleanup_func_before_exec(th_ptr);
507
508 /*
509 * Unfortunately, we can't release native threading resource at fork
510 * because libc may have unstable locking state therefore touching
511 * a threading resource may cause a deadlock.
512 *
513 * FIXME: Skipping native_mutex_destroy(pthread_mutex_destroy) is safe
514 * with NPTL, but native_thread_destroy calls pthread_cond_destroy
515 * which calls free(3), so there is a small memory leak atfork, here.
516 */
517 if (atfork)
518 return;
519
520 rb_native_mutex_destroy(&th->interrupt_lock);
521 native_thread_destroy(th);
522}
523
524static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
525static VALUE rb_thread_to_s(VALUE thread);
526
527void
528ruby_thread_init_stack(rb_thread_t *th)
529{
530 native_thread_init_stack(th);
531}
532
533const VALUE *
534rb_vm_proc_local_ep(VALUE proc)
535{
536 const VALUE *ep = vm_proc_ep(proc);
537
538 if (ep) {
539 return rb_vm_ep_local_ep(ep);
540 }
541 else {
542 return NULL;
543 }
544}
545
546// for ractor, defined in vm.c
547VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
548 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
549
550static VALUE
551thread_do_start_proc(rb_thread_t *th)
552{
553 VALUE args = th->invoke_arg.proc.args;
554 const VALUE *args_ptr;
555 int args_len;
556 VALUE procval = th->invoke_arg.proc.proc;
557 rb_proc_t *proc;
558 GetProcPtr(procval, proc);
559
560 th->ec->errinfo = Qnil;
561 th->ec->root_lep = rb_vm_proc_local_ep(procval);
562 th->ec->root_svar = Qfalse;
563
564 vm_check_ints_blocking(th->ec);
565
566 if (th->invoke_type == thread_invoke_type_ractor_proc) {
567 VALUE self = rb_ractor_self(th->ractor);
568 VM_ASSERT(FIXNUM_P(args));
569 args_len = FIX2INT(args);
570 args_ptr = ALLOCA_N(VALUE, args_len);
571 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
572 vm_check_ints_blocking(th->ec);
573
574 return rb_vm_invoke_proc_with_self(
575 th->ec, proc, self,
576 args_len, args_ptr,
577 th->invoke_arg.proc.kw_splat,
578 VM_BLOCK_HANDLER_NONE
579 );
580 }
581 else {
582 args_len = RARRAY_LENINT(args);
583 if (args_len < 8) {
584 /* free proc.args if the length is enough small */
585 args_ptr = ALLOCA_N(VALUE, args_len);
586 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR_TRANSIENT(args), VALUE, args_len);
587 th->invoke_arg.proc.args = Qnil;
588 }
589 else {
590 args_ptr = RARRAY_CONST_PTR(args);
591 }
592
593 vm_check_ints_blocking(th->ec);
594
595 return rb_vm_invoke_proc(
596 th->ec, proc,
597 args_len, args_ptr,
598 th->invoke_arg.proc.kw_splat,
599 VM_BLOCK_HANDLER_NONE
600 );
601 }
602}
603
604static void
605thread_do_start(rb_thread_t *th)
606{
607 native_set_thread_name(th);
608 VALUE result = Qundef;
609
610 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
611
612 switch (th->invoke_type) {
613 case thread_invoke_type_proc:
614 result = thread_do_start_proc(th);
615 break;
616
617 case thread_invoke_type_ractor_proc:
618 result = thread_do_start_proc(th);
619 rb_ractor_atexit(th->ec, result);
620 break;
621
622 case thread_invoke_type_func:
623 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
624 break;
625
626 case thread_invoke_type_none:
627 rb_bug("unreachable");
628 }
629
631
632 th->value = result;
633
634 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
635}
636
637void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
638#define thread_sched_to_dead thread_sched_to_waiting
639
640static int
641thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
642{
643 STACK_GROW_DIR_DETECTION;
644 enum ruby_tag_type state;
645 VALUE errinfo = Qnil;
646 size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
647 rb_thread_t *ractor_main_th = th->ractor->threads.main;
648 VALUE * vm_stack = NULL;
649
650 VM_ASSERT(th != th->vm->ractor.main_thread);
651 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
652
653 // setup native thread
654 thread_sched_to_running(TH_SCHED(th), th);
655 ruby_thread_set_native(th);
656
657 RUBY_DEBUG_LOG("got lock. th:%u", rb_th_serial(th));
658
659 // setup ractor
660 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
661 RB_VM_LOCK();
662 {
663 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
664 rb_ractor_t *r = th->ractor;
665 r->r_stdin = rb_io_prep_stdin();
666 r->r_stdout = rb_io_prep_stdout();
667 r->r_stderr = rb_io_prep_stderr();
668 }
669 RB_VM_UNLOCK();
670 }
671
672 // This assertion is not passed on win32 env. Check it later.
673 // VM_ASSERT((size * sizeof(VALUE)) <= th->ec->machine.stack_maxsize);
674
675 // setup VM and machine stack
676 vm_stack = alloca(size * sizeof(VALUE));
677 VM_ASSERT(vm_stack);
678
679 rb_ec_initialize_vm_stack(th->ec, vm_stack, size);
680 th->ec->machine.stack_start = STACK_DIR_UPPER(vm_stack + size, vm_stack);
681 th->ec->machine.stack_maxsize -= size * sizeof(VALUE);
682
683 // Ensure that we are not joinable.
684 VM_ASSERT(UNDEF_P(th->value));
685
686 EC_PUSH_TAG(th->ec);
687
688 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
689 SAVE_ROOT_JMPBUF(th, thread_do_start(th));
690 }
691 else {
692 errinfo = th->ec->errinfo;
693
694 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
695 if (!NIL_P(exc)) errinfo = exc;
696
697 if (state == TAG_FATAL) {
698 if (th->invoke_type == thread_invoke_type_ractor_proc) {
699 rb_ractor_atexit(th->ec, Qnil);
700 }
701 /* fatal error within this thread, need to stop whole script */
702 }
703 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
704 /* exit on main_thread. */
705 }
706 else {
707 if (th->report_on_exception) {
708 VALUE mesg = rb_thread_to_s(th->self);
709 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
710 rb_write_error_str(mesg);
711 rb_ec_error_print(th->ec, errinfo);
712 }
713
714 if (th->invoke_type == thread_invoke_type_ractor_proc) {
715 rb_ractor_atexit_exception(th->ec);
716 }
717
718 if (th->vm->thread_abort_on_exception ||
719 th->abort_on_exception || RTEST(ruby_debug)) {
720 /* exit on main_thread */
721 }
722 else {
723 errinfo = Qnil;
724 }
725 }
726 th->value = Qnil;
727 }
728
729 // The thread is effectively finished and can be joined.
730 VM_ASSERT(!UNDEF_P(th->value));
731
732 rb_threadptr_join_list_wakeup(th);
733 rb_threadptr_unlock_all_locking_mutexes(th);
734
735 if (th->invoke_type == thread_invoke_type_ractor_proc) {
736 rb_thread_terminate_all(th);
737 rb_ractor_teardown(th->ec);
738 }
739
740 th->status = THREAD_KILLED;
741 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
742
743 if (th->vm->ractor.main_thread == th) {
744 ruby_stop(0);
745 }
746
747 if (RB_TYPE_P(errinfo, T_OBJECT)) {
748 /* treat with normal error object */
749 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
750 }
751
752 EC_POP_TAG();
753
754 rb_ec_clear_current_thread_trace_func(th->ec);
755
756 /* locking_mutex must be Qfalse */
757 if (th->locking_mutex != Qfalse) {
758 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
759 (void *)th, th->locking_mutex);
760 }
761
762 if (ractor_main_th->status == THREAD_KILLED &&
763 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
764 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
765 rb_threadptr_interrupt(ractor_main_th);
766 }
767
768 rb_check_deadlock(th->ractor);
769
770 rb_fiber_close(th->ec->fiber_ptr);
771
772 thread_cleanup_func(th, FALSE);
773 VM_ASSERT(th->ec->vm_stack == NULL);
774
775 if (th->invoke_type == thread_invoke_type_ractor_proc) {
776 // after rb_ractor_living_threads_remove()
777 // GC will happen anytime and this ractor can be collected (and destroy GVL).
778 // So gvl_release() should be before it.
779 thread_sched_to_dead(TH_SCHED(th));
780 rb_ractor_living_threads_remove(th->ractor, th);
781 }
782 else {
783 rb_ractor_living_threads_remove(th->ractor, th);
784 thread_sched_to_dead(TH_SCHED(th));
785 }
786
787 return 0;
788}
789
791 enum thread_invoke_type type;
792
793 // for normal proc thread
794 VALUE args;
795 VALUE proc;
796
797 // for ractor
798 rb_ractor_t *g;
799
800 // for func
801 VALUE (*fn)(void *);
802};
803
804static VALUE
805thread_create_core(VALUE thval, struct thread_create_params *params)
806{
807 rb_execution_context_t *ec = GET_EC();
808 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
809 int err;
810
811 if (OBJ_FROZEN(current_th->thgroup)) {
813 "can't start a new thread (frozen ThreadGroup)");
814 }
815
816 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
817
818 switch (params->type) {
819 case thread_invoke_type_proc:
820 th->invoke_type = thread_invoke_type_proc;
821 th->invoke_arg.proc.args = params->args;
822 th->invoke_arg.proc.proc = params->proc;
823 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
824 break;
825
826 case thread_invoke_type_ractor_proc:
827#if RACTOR_CHECK_MODE > 0
828 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
829#endif
830 th->invoke_type = thread_invoke_type_ractor_proc;
831 th->ractor = params->g;
832 th->ractor->threads.main = th;
833 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
834 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
835 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
836 rb_ractor_send_parameters(ec, params->g, params->args);
837 break;
838
839 case thread_invoke_type_func:
840 th->invoke_type = thread_invoke_type_func;
841 th->invoke_arg.func.func = params->fn;
842 th->invoke_arg.func.arg = (void *)params->args;
843 break;
844
845 default:
846 rb_bug("unreachable");
847 }
848
849 th->priority = current_th->priority;
850 th->thgroup = current_th->thgroup;
851
852 th->pending_interrupt_queue = rb_ary_hidden_new(0);
853 th->pending_interrupt_queue_checked = 0;
854 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
855 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
856
857 rb_native_mutex_initialize(&th->interrupt_lock);
858
859 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
860
861 rb_ractor_living_threads_insert(th->ractor, th);
862
863 /* kick thread */
864 err = native_thread_create(th);
865 if (err) {
866 th->status = THREAD_KILLED;
867 rb_ractor_living_threads_remove(th->ractor, th);
868 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
869 }
870 return thval;
871}
872
873#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
874
875/*
876 * call-seq:
877 * Thread.new { ... } -> thread
878 * Thread.new(*args, &proc) -> thread
879 * Thread.new(*args) { |args| ... } -> thread
880 *
881 * Creates a new thread executing the given block.
882 *
883 * Any +args+ given to ::new will be passed to the block:
884 *
885 * arr = []
886 * a, b, c = 1, 2, 3
887 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
888 * arr #=> [1, 2, 3]
889 *
890 * A ThreadError exception is raised if ::new is called without a block.
891 *
892 * If you're going to subclass Thread, be sure to call super in your
893 * +initialize+ method, otherwise a ThreadError will be raised.
894 */
895static VALUE
896thread_s_new(int argc, VALUE *argv, VALUE klass)
897{
898 rb_thread_t *th;
899 VALUE thread = rb_thread_alloc(klass);
900
901 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
902 rb_raise(rb_eThreadError, "can't alloc thread");
903 }
904
905 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
906 th = rb_thread_ptr(thread);
907 if (!threadptr_initialized(th)) {
908 rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
909 klass);
910 }
911 return thread;
912}
913
914/*
915 * call-seq:
916 * Thread.start([args]*) {|args| block } -> thread
917 * Thread.fork([args]*) {|args| block } -> thread
918 *
919 * Basically the same as ::new. However, if class Thread is subclassed, then
920 * calling +start+ in that subclass will not invoke the subclass's
921 * +initialize+ method.
922 */
923
924static VALUE
925thread_start(VALUE klass, VALUE args)
926{
927 struct thread_create_params params = {
928 .type = thread_invoke_type_proc,
929 .args = args,
930 .proc = rb_block_proc(),
931 };
932 return thread_create_core(rb_thread_alloc(klass), &params);
933}
934
935static VALUE
936threadptr_invoke_proc_location(rb_thread_t *th)
937{
938 if (th->invoke_type == thread_invoke_type_proc) {
939 return rb_proc_location(th->invoke_arg.proc.proc);
940 }
941 else {
942 return Qnil;
943 }
944}
945
946/* :nodoc: */
947static VALUE
948thread_initialize(VALUE thread, VALUE args)
949{
950 rb_thread_t *th = rb_thread_ptr(thread);
951
952 if (!rb_block_given_p()) {
953 rb_raise(rb_eThreadError, "must be called with a block");
954 }
955 else if (th->invoke_type != thread_invoke_type_none) {
956 VALUE loc = threadptr_invoke_proc_location(th);
957 if (!NIL_P(loc)) {
959 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
960 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
961 }
962 else {
963 rb_raise(rb_eThreadError, "already initialized thread");
964 }
965 }
966 else {
967 struct thread_create_params params = {
968 .type = thread_invoke_type_proc,
969 .args = args,
970 .proc = rb_block_proc(),
971 };
972 return thread_create_core(thread, &params);
973 }
974}
975
976VALUE
977rb_thread_create(VALUE (*fn)(void *), void *arg)
978{
979 struct thread_create_params params = {
980 .type = thread_invoke_type_func,
981 .fn = fn,
982 .args = (VALUE)arg,
983 };
984 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
985}
986
987VALUE
988rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc)
989{
990 struct thread_create_params params = {
991 .type = thread_invoke_type_ractor_proc,
992 .g = g,
993 .args = args,
994 .proc = proc,
995 };
996 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
997}
998
999
1000struct join_arg {
1001 struct rb_waiting_list *waiter;
1002 rb_thread_t *target;
1003 VALUE timeout;
1004 rb_hrtime_t *limit;
1005};
1006
1007static VALUE
1008remove_from_join_list(VALUE arg)
1009{
1010 struct join_arg *p = (struct join_arg *)arg;
1011 rb_thread_t *target_thread = p->target;
1012
1013 if (target_thread->status != THREAD_KILLED) {
1014 struct rb_waiting_list **join_list = &target_thread->join_list;
1015
1016 while (*join_list) {
1017 if (*join_list == p->waiter) {
1018 *join_list = (*join_list)->next;
1019 break;
1020 }
1021
1022 join_list = &(*join_list)->next;
1023 }
1024 }
1025
1026 return Qnil;
1027}
1028
1029static int
1030thread_finished(rb_thread_t *th)
1031{
1032 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1033}
1034
1035static VALUE
1036thread_join_sleep(VALUE arg)
1037{
1038 struct join_arg *p = (struct join_arg *)arg;
1039 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1040 rb_hrtime_t end = 0, *limit = p->limit;
1041
1042 if (limit) {
1043 end = rb_hrtime_add(*limit, rb_hrtime_now());
1044 }
1045
1046 while (!thread_finished(target_th)) {
1047 VALUE scheduler = rb_fiber_scheduler_current();
1048
1049 if (scheduler != Qnil) {
1050 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1051 // Check if the target thread is finished after blocking:
1052 if (thread_finished(target_th)) break;
1053 // Otherwise, a timeout occurred:
1054 else return Qfalse;
1055 }
1056 else if (!limit) {
1057 th->status = THREAD_STOPPED_FOREVER;
1058 rb_ractor_sleeper_threads_inc(th->ractor);
1059 rb_check_deadlock(th->ractor);
1060 native_sleep(th, 0);
1061 rb_ractor_sleeper_threads_dec(th->ractor);
1062 }
1063 else {
1064 if (hrtime_update_expire(limit, end)) {
1065 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1066 return Qfalse;
1067 }
1068 th->status = THREAD_STOPPED;
1069 native_sleep(th, limit);
1070 }
1071 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1072 th->status = THREAD_RUNNABLE;
1073
1074 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1075 }
1076
1077 return Qtrue;
1078}
1079
1080static VALUE
1081thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1082{
1083 rb_execution_context_t *ec = GET_EC();
1084 rb_thread_t *th = ec->thread_ptr;
1085 rb_fiber_t *fiber = ec->fiber_ptr;
1086
1087 if (th == target_th) {
1088 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1089 }
1090
1091 if (th->ractor->threads.main == target_th) {
1092 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1093 }
1094
1095 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1096
1097 if (target_th->status != THREAD_KILLED) {
1098 struct rb_waiting_list waiter;
1099 waiter.next = target_th->join_list;
1100 waiter.thread = th;
1101 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1102 target_th->join_list = &waiter;
1103
1104 struct join_arg arg;
1105 arg.waiter = &waiter;
1106 arg.target = target_th;
1107 arg.timeout = timeout;
1108 arg.limit = limit;
1109
1110 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1111 return Qnil;
1112 }
1113 }
1114
1115 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1116
1117 if (target_th->ec->errinfo != Qnil) {
1118 VALUE err = target_th->ec->errinfo;
1119
1120 if (FIXNUM_P(err)) {
1121 switch (err) {
1122 case INT2FIX(TAG_FATAL):
1123 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1124
1125 /* OK. killed. */
1126 break;
1127 default:
1128 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1129 }
1130 }
1131 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1132 rb_bug("thread_join: THROW_DATA should not reach here.");
1133 }
1134 else {
1135 /* normal exception */
1136 rb_exc_raise(err);
1137 }
1138 }
1139 return target_th->self;
1140}
1141
1142/*
1143 * call-seq:
1144 * thr.join -> thr
1145 * thr.join(limit) -> thr
1146 *
1147 * The calling thread will suspend execution and run this +thr+.
1148 *
1149 * Does not return until +thr+ exits or until the given +limit+ seconds have
1150 * passed.
1151 *
1152 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1153 * returned.
1154 *
1155 * Any threads not joined will be killed when the main program exits.
1156 *
1157 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1158 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1159 * will be processed at this time.
1160 *
1161 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1162 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1163 * x.join # Let thread x finish, thread a will be killed on exit.
1164 * #=> "axyz"
1165 *
1166 * The following example illustrates the +limit+ parameter.
1167 *
1168 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1169 * puts "Waiting" until y.join(0.15)
1170 *
1171 * This will produce:
1172 *
1173 * tick...
1174 * Waiting
1175 * tick...
1176 * Waiting
1177 * tick...
1178 * tick...
1179 */
1180
1181static VALUE
1182thread_join_m(int argc, VALUE *argv, VALUE self)
1183{
1184 VALUE timeout = Qnil;
1185 rb_hrtime_t rel = 0, *limit = 0;
1186
1187 if (rb_check_arity(argc, 0, 1)) {
1188 timeout = argv[0];
1189 }
1190
1191 // Convert the timeout eagerly, so it's always converted and deterministic
1192 /*
1193 * This supports INFINITY and negative values, so we can't use
1194 * rb_time_interval right now...
1195 */
1196 if (NIL_P(timeout)) {
1197 /* unlimited */
1198 }
1199 else if (FIXNUM_P(timeout)) {
1200 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1201 limit = &rel;
1202 }
1203 else {
1204 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1205 }
1206
1207 return thread_join(rb_thread_ptr(self), timeout, limit);
1208}
1209
1210/*
1211 * call-seq:
1212 * thr.value -> obj
1213 *
1214 * Waits for +thr+ to complete, using #join, and returns its value or raises
1215 * the exception which terminated the thread.
1216 *
1217 * a = Thread.new { 2 + 2 }
1218 * a.value #=> 4
1219 *
1220 * b = Thread.new { raise 'something went wrong' }
1221 * b.value #=> RuntimeError: something went wrong
1222 */
1223
1224static VALUE
1225thread_value(VALUE self)
1226{
1227 rb_thread_t *th = rb_thread_ptr(self);
1228 thread_join(th, Qnil, 0);
1229 if (UNDEF_P(th->value)) {
1230 // If the thread is dead because we forked th->value is still Qundef.
1231 return Qnil;
1232 }
1233 return th->value;
1234}
1235
1236/*
1237 * Thread Scheduling
1238 */
1239
1240static void
1241getclockofday(struct timespec *ts)
1242{
1243#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1244 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1245 return;
1246#endif
1247 rb_timespec_now(ts);
1248}
1249
1250/*
1251 * Don't inline this, since library call is already time consuming
1252 * and we don't want "struct timespec" on stack too long for GC
1253 */
1254NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1255rb_hrtime_t
1256rb_hrtime_now(void)
1257{
1258 struct timespec ts;
1259
1260 getclockofday(&ts);
1261 return rb_timespec2hrtime(&ts);
1262}
1263
1264static void
1265sleep_forever(rb_thread_t *th, unsigned int fl)
1266{
1267 enum rb_thread_status prev_status = th->status;
1268 enum rb_thread_status status;
1269 int woke;
1270
1271 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1272 th->status = status;
1273 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1274 while (th->status == status) {
1275 if (fl & SLEEP_DEADLOCKABLE) {
1276 rb_ractor_sleeper_threads_inc(th->ractor);
1277 rb_check_deadlock(th->ractor);
1278 }
1279 native_sleep(th, 0);
1280 if (fl & SLEEP_DEADLOCKABLE) {
1281 rb_ractor_sleeper_threads_dec(th->ractor);
1282 }
1283 woke = vm_check_ints_blocking(th->ec);
1284 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1285 break;
1286 }
1287 th->status = prev_status;
1288}
1289
1290/*
1291 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1292 * being uninitialized, maybe other versions, too.
1293 */
1294COMPILER_WARNING_PUSH
1295#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1296COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1297#endif
1298#ifndef PRIu64
1299#define PRIu64 PRI_64_PREFIX "u"
1300#endif
1301/*
1302 * @end is the absolute time when @ts is set to expire
1303 * Returns true if @end has past
1304 * Updates @ts and returns false otherwise
1305 */
1306static int
1307hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1308{
1309 rb_hrtime_t now = rb_hrtime_now();
1310
1311 if (now > end) return 1;
1312
1313 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1314
1315 *timeout = end - now;
1316 return 0;
1317}
1318COMPILER_WARNING_POP
1319
1320static int
1321sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1322{
1323 enum rb_thread_status prev_status = th->status;
1324 int woke;
1325 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1326
1327 th->status = THREAD_STOPPED;
1328 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1329 while (th->status == THREAD_STOPPED) {
1330 native_sleep(th, &rel);
1331 woke = vm_check_ints_blocking(th->ec);
1332 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1333 break;
1334 if (hrtime_update_expire(&rel, end))
1335 break;
1336 woke = 1;
1337 }
1338 th->status = prev_status;
1339 return woke;
1340}
1341
1342static int
1343sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1344{
1345 enum rb_thread_status prev_status = th->status;
1346 int woke;
1347 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1348
1349 th->status = THREAD_STOPPED;
1350 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1351 while (th->status == THREAD_STOPPED) {
1352 native_sleep(th, &rel);
1353 woke = vm_check_ints_blocking(th->ec);
1354 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1355 break;
1356 if (hrtime_update_expire(&rel, end))
1357 break;
1358 woke = 1;
1359 }
1360 th->status = prev_status;
1361 return woke;
1362}
1363
1364void
1366{
1367 RUBY_DEBUG_LOG("");
1368 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1369}
1370
1371void
1373{
1374 RUBY_DEBUG_LOG("");
1375 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1376}
1377
1378void
1379rb_thread_sleep_interruptible(void)
1380{
1381 rb_thread_t *th = GET_THREAD();
1382 enum rb_thread_status prev_status = th->status;
1383
1384 th->status = THREAD_STOPPED;
1385 native_sleep(th, 0);
1386 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1387 th->status = prev_status;
1388}
1389
1390static void
1391rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1392{
1393 VALUE scheduler = rb_fiber_scheduler_current();
1394 if (scheduler != Qnil) {
1395 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1396 }
1397 else {
1398 RUBY_DEBUG_LOG("");
1399 if (end) {
1400 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1401 }
1402 else {
1403 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1404 }
1405 }
1406}
1407
1408void
1410{
1411 rb_thread_t *th = GET_THREAD();
1412
1413 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1414}
1415
1416/*
1417 * CAUTION: This function causes thread switching.
1418 * rb_thread_check_ints() check ruby's interrupts.
1419 * some interrupt needs thread switching/invoke handlers,
1420 * and so on.
1421 */
1422
1423void
1425{
1426 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1427}
1428
1429/*
1430 * Hidden API for tcl/tk wrapper.
1431 * There is no guarantee to perpetuate it.
1432 */
1433int
1434rb_thread_check_trap_pending(void)
1435{
1436 return rb_signal_buff_size() != 0;
1437}
1438
1439/* This function can be called in blocking region. */
1440int
1442{
1443 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1444}
1445
1446void
1451
1452static void
1453rb_thread_schedule_limits(uint32_t limits_us)
1454{
1455 if (!rb_thread_alone()) {
1456 rb_thread_t *th = GET_THREAD();
1457 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1458
1459 if (th->running_time_us >= limits_us) {
1460 RUBY_DEBUG_LOG("switch %s", "start");
1461
1462 RB_GC_SAVE_MACHINE_CONTEXT(th);
1463 thread_sched_yield(TH_SCHED(th), th);
1464 rb_ractor_thread_switch(th->ractor, th);
1465
1466 RUBY_DEBUG_LOG("switch %s", "done");
1467 }
1468 }
1469}
1470
1471void
1473{
1474 rb_thread_schedule_limits(0);
1475 RUBY_VM_CHECK_INTS(GET_EC());
1476}
1477
1478/* blocking region */
1479
1480static inline int
1481blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1482 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1483{
1484#ifdef RUBY_VM_CRITICAL_SECTION
1485 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1486#endif
1487 VM_ASSERT(th == GET_THREAD());
1488
1489 region->prev_status = th->status;
1490 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1491 th->blocking_region_buffer = region;
1492 th->status = THREAD_STOPPED;
1493 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1494
1495 RUBY_DEBUG_LOG("");
1496
1497 RB_GC_SAVE_MACHINE_CONTEXT(th);
1498 thread_sched_to_waiting(TH_SCHED(th));
1499 return TRUE;
1500 }
1501 else {
1502 return FALSE;
1503 }
1504}
1505
1506static inline void
1507blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1508{
1509 /* entry to ubf_list still permitted at this point, make it impossible: */
1510 unblock_function_clear(th);
1511 /* entry to ubf_list impossible at this point, so unregister is safe: */
1512 unregister_ubf_list(th);
1513
1514 thread_sched_to_running(TH_SCHED(th), th);
1515 rb_ractor_thread_switch(th->ractor, th);
1516
1517 th->blocking_region_buffer = 0;
1518 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1519 if (th->status == THREAD_STOPPED) {
1520 th->status = region->prev_status;
1521 }
1522
1523 RUBY_DEBUG_LOG("");
1524 VM_ASSERT(th == GET_THREAD());
1525}
1526
1527void *
1528rb_nogvl(void *(*func)(void *), void *data1,
1529 rb_unblock_function_t *ubf, void *data2,
1530 int flags)
1531{
1532 void *val = 0;
1533 rb_execution_context_t *ec = GET_EC();
1534 rb_thread_t *th = rb_ec_thread_ptr(ec);
1535 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1536 bool is_main_thread = vm->ractor.main_thread == th;
1537 int saved_errno = 0;
1538 VALUE ubf_th = Qfalse;
1539
1540 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1541 ubf = ubf_select;
1542 data2 = th;
1543 }
1544 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1545 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1546 vm->ubf_async_safe = 1;
1547 }
1548 else {
1549 ubf_th = rb_thread_start_unblock_thread();
1550 }
1551 }
1552
1553 BLOCKING_REGION(th, {
1554 val = func(data1);
1555 saved_errno = errno;
1556 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1557
1558 if (is_main_thread) vm->ubf_async_safe = 0;
1559
1560 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1561 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1562 }
1563
1564 if (ubf_th != Qfalse) {
1565 thread_value(rb_thread_kill(ubf_th));
1566 }
1567
1568 errno = saved_errno;
1569
1570 return val;
1571}
1572
1573/*
1574 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1575 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1576 * without interrupt process.
1577 *
1578 * rb_thread_call_without_gvl() does:
1579 * (1) Check interrupts.
1580 * (2) release GVL.
1581 * Other Ruby threads may run in parallel.
1582 * (3) call func with data1
1583 * (4) acquire GVL.
1584 * Other Ruby threads can not run in parallel any more.
1585 * (5) Check interrupts.
1586 *
1587 * rb_thread_call_without_gvl2() does:
1588 * (1) Check interrupt and return if interrupted.
1589 * (2) release GVL.
1590 * (3) call func with data1 and a pointer to the flags.
1591 * (4) acquire GVL.
1592 *
1593 * If another thread interrupts this thread (Thread#kill, signal delivery,
1594 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1595 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1596 * toggling a cancellation flag, canceling the invocation of a call inside
1597 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1598 *
1599 * There are built-in ubfs and you can specify these ubfs:
1600 *
1601 * * RUBY_UBF_IO: ubf for IO operation
1602 * * RUBY_UBF_PROCESS: ubf for process operation
1603 *
1604 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1605 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1606 * provide proper ubf(), your program will not stop for Control+C or other
1607 * shutdown events.
1608 *
1609 * "Check interrupts" on above list means checking asynchronous
1610 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1611 * request, and so on) and calling corresponding procedures
1612 * (such as `trap' for signals, raise an exception for Thread#raise).
1613 * If `func()' finished and received interrupts, you may skip interrupt
1614 * checking. For example, assume the following func() it reads data from file.
1615 *
1616 * read_func(...) {
1617 * // (a) before read
1618 * read(buffer); // (b) reading
1619 * // (c) after read
1620 * }
1621 *
1622 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1623 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1624 * at (c), after *read* operation is completed, checking interrupts is harmful
1625 * because it causes irrevocable side-effect, the read data will vanish. To
1626 * avoid such problem, the `read_func()' should be used with
1627 * `rb_thread_call_without_gvl2()'.
1628 *
1629 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1630 * immediately. This function does not show when the execution was interrupted.
1631 * For example, there are 4 possible timing (a), (b), (c) and before calling
1632 * read_func(). You need to record progress of a read_func() and check
1633 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1634 * `rb_thread_check_ints()' correctly or your program can not process proper
1635 * process such as `trap' and so on.
1636 *
1637 * NOTE: You can not execute most of Ruby C API and touch Ruby
1638 * objects in `func()' and `ubf()', including raising an
1639 * exception, because current thread doesn't acquire GVL
1640 * (it causes synchronization problems). If you need to
1641 * call ruby functions either use rb_thread_call_with_gvl()
1642 * or read source code of C APIs and confirm safety by
1643 * yourself.
1644 *
1645 * NOTE: In short, this API is difficult to use safely. I recommend you
1646 * use other ways if you have. We lack experiences to use this API.
1647 * Please report your problem related on it.
1648 *
1649 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1650 * for a short running `func()'. Be sure to benchmark and use this
1651 * mechanism when `func()' consumes enough time.
1652 *
1653 * Safe C API:
1654 * * rb_thread_interrupted() - check interrupt flag
1655 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1656 * they will work without GVL, and may acquire GVL when GC is needed.
1657 */
1658void *
1659rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1660 rb_unblock_function_t *ubf, void *data2)
1661{
1662 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1663}
1664
1665void *
1666rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1667 rb_unblock_function_t *ubf, void *data2)
1668{
1669 return rb_nogvl(func, data1, ubf, data2, 0);
1670}
1671
1672VALUE
1673rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1674{
1675 volatile VALUE val = Qundef; /* shouldn't be used */
1676 rb_execution_context_t * volatile ec = GET_EC();
1677 volatile int saved_errno = 0;
1678 enum ruby_tag_type state;
1679
1680 struct waiting_fd waiting_fd = {
1681 .fd = fd,
1682 .th = rb_ec_thread_ptr(ec)
1683 };
1684
1685 // `errno` is only valid when there is an actual error - but we can't
1686 // extract that from the return value of `func` alone, so we clear any
1687 // prior `errno` value here so that we can later check if it was set by
1688 // `func` or not (as opposed to some previously set value).
1689 errno = 0;
1690
1691 RB_VM_LOCK_ENTER();
1692 {
1693 ccan_list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &waiting_fd.wfd_node);
1694 }
1695 RB_VM_LOCK_LEAVE();
1696
1697 EC_PUSH_TAG(ec);
1698 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1699 BLOCKING_REGION(waiting_fd.th, {
1700 val = func(data1);
1701 saved_errno = errno;
1702 }, ubf_select, waiting_fd.th, FALSE);
1703 }
1704 EC_POP_TAG();
1705
1706 /*
1707 * must be deleted before jump
1708 * this will delete either from waiting_fds or on-stack CCAN_LIST_HEAD(busy)
1709 */
1710 RB_VM_LOCK_ENTER();
1711 {
1712 ccan_list_del(&waiting_fd.wfd_node);
1713 }
1714 RB_VM_LOCK_LEAVE();
1715
1716 if (state) {
1717 EC_JUMP_TAG(ec, state);
1718 }
1719 /* TODO: check func() */
1720 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1721
1722 // If the error was a timeout, we raise a specific exception for that:
1723 if (saved_errno == ETIMEDOUT) {
1724 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1725 }
1726
1727 errno = saved_errno;
1728
1729 return val;
1730}
1731
1732/*
1733 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1734 *
1735 * After releasing GVL using
1736 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1737 * methods. If you need to access Ruby you must use this function
1738 * rb_thread_call_with_gvl().
1739 *
1740 * This function rb_thread_call_with_gvl() does:
1741 * (1) acquire GVL.
1742 * (2) call passed function `func'.
1743 * (3) release GVL.
1744 * (4) return a value which is returned at (2).
1745 *
1746 * NOTE: You should not return Ruby object at (2) because such Object
1747 * will not be marked.
1748 *
1749 * NOTE: If an exception is raised in `func', this function DOES NOT
1750 * protect (catch) the exception. If you have any resources
1751 * which should free before throwing exception, you need use
1752 * rb_protect() in `func' and return a value which represents
1753 * exception was raised.
1754 *
1755 * NOTE: This function should not be called by a thread which was not
1756 * created as Ruby thread (created by Thread.new or so). In other
1757 * words, this function *DOES NOT* associate or convert a NON-Ruby
1758 * thread to a Ruby thread.
1759 */
1760void *
1761rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1762{
1763 rb_thread_t *th = ruby_thread_from_native();
1764 struct rb_blocking_region_buffer *brb;
1765 struct rb_unblock_callback prev_unblock;
1766 void *r;
1767
1768 if (th == 0) {
1769 /* Error has occurred, but we can't use rb_bug()
1770 * because this thread is not Ruby's thread.
1771 * What should we do?
1772 */
1773 bp();
1774 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1775 exit(EXIT_FAILURE);
1776 }
1777
1778 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1779 prev_unblock = th->unblock;
1780
1781 if (brb == 0) {
1782 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1783 }
1784
1785 blocking_region_end(th, brb);
1786 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1787 r = (*func)(data1);
1788 /* leave from Ruby world: You can not access Ruby values, etc. */
1789 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1790 RUBY_ASSERT_ALWAYS(released);
1791 return r;
1792}
1793
1794/*
1795 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1796 *
1797 ***
1798 *** This API is EXPERIMENTAL!
1799 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1800 ***
1801 */
1802
1803int
1804ruby_thread_has_gvl_p(void)
1805{
1806 rb_thread_t *th = ruby_thread_from_native();
1807
1808 if (th && th->blocking_region_buffer == 0) {
1809 return 1;
1810 }
1811 else {
1812 return 0;
1813 }
1814}
1815
1816/*
1817 * call-seq:
1818 * Thread.pass -> nil
1819 *
1820 * Give the thread scheduler a hint to pass execution to another thread.
1821 * A running thread may or may not switch, it depends on OS and processor.
1822 */
1823
1824static VALUE
1825thread_s_pass(VALUE klass)
1826{
1828 return Qnil;
1829}
1830
1831/*****************************************************/
1832
1833/*
1834 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1835 *
1836 * Async events such as an exception thrown by Thread#raise,
1837 * Thread#kill and thread termination (after main thread termination)
1838 * will be queued to th->pending_interrupt_queue.
1839 * - clear: clear the queue.
1840 * - enque: enqueue err object into queue.
1841 * - deque: dequeue err object from queue.
1842 * - active_p: return 1 if the queue should be checked.
1843 *
1844 * All rb_threadptr_pending_interrupt_* functions are called by
1845 * a GVL acquired thread, of course.
1846 * Note that all "rb_" prefix APIs need GVL to call.
1847 */
1848
1849void
1850rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1851{
1852 rb_ary_clear(th->pending_interrupt_queue);
1853}
1854
1855void
1856rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1857{
1858 rb_ary_push(th->pending_interrupt_queue, v);
1859 th->pending_interrupt_queue_checked = 0;
1860}
1861
1862static void
1863threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1864{
1865 if (!th->pending_interrupt_queue) {
1866 rb_raise(rb_eThreadError, "uninitialized thread");
1867 }
1868}
1869
1870enum handle_interrupt_timing {
1871 INTERRUPT_NONE,
1872 INTERRUPT_IMMEDIATE,
1873 INTERRUPT_ON_BLOCKING,
1874 INTERRUPT_NEVER
1875};
1876
1877static enum handle_interrupt_timing
1878rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
1879{
1880 VALUE mask;
1881 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1882 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1883 VALUE mod;
1884 long i;
1885
1886 for (i=0; i<mask_stack_len; i++) {
1887 mask = mask_stack[mask_stack_len-(i+1)];
1888
1889 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
1890 VALUE klass = mod;
1891 VALUE sym;
1892
1893 if (BUILTIN_TYPE(mod) == T_ICLASS) {
1894 klass = RBASIC(mod)->klass;
1895 }
1896 else if (mod != RCLASS_ORIGIN(mod)) {
1897 continue;
1898 }
1899
1900 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1901 if (sym == sym_immediate) {
1902 return INTERRUPT_IMMEDIATE;
1903 }
1904 else if (sym == sym_on_blocking) {
1905 return INTERRUPT_ON_BLOCKING;
1906 }
1907 else if (sym == sym_never) {
1908 return INTERRUPT_NEVER;
1909 }
1910 else {
1911 rb_raise(rb_eThreadError, "unknown mask signature");
1912 }
1913 }
1914 }
1915 /* try to next mask */
1916 }
1917 return INTERRUPT_NONE;
1918}
1919
1920static int
1921rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
1922{
1923 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1924}
1925
1926static int
1927rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
1928{
1929 int i;
1930 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1931 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
1932 if (rb_obj_is_kind_of(e, err)) {
1933 return TRUE;
1934 }
1935 }
1936 return FALSE;
1937}
1938
1939static VALUE
1940rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
1941{
1942#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1943 int i;
1944
1945 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1946 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
1947
1948 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
1949
1950 switch (mask_timing) {
1951 case INTERRUPT_ON_BLOCKING:
1952 if (timing != INTERRUPT_ON_BLOCKING) {
1953 break;
1954 }
1955 /* fall through */
1956 case INTERRUPT_NONE: /* default: IMMEDIATE */
1957 case INTERRUPT_IMMEDIATE:
1958 rb_ary_delete_at(th->pending_interrupt_queue, i);
1959 return err;
1960 case INTERRUPT_NEVER:
1961 break;
1962 }
1963 }
1964
1965 th->pending_interrupt_queue_checked = 1;
1966 return Qundef;
1967#else
1968 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
1969 if (rb_threadptr_pending_interrupt_empty_p(th)) {
1970 th->pending_interrupt_queue_checked = 1;
1971 }
1972 return err;
1973#endif
1974}
1975
1976static int
1977threadptr_pending_interrupt_active_p(rb_thread_t *th)
1978{
1979 /*
1980 * For optimization, we don't check async errinfo queue
1981 * if the queue and the thread interrupt mask were not changed
1982 * since last check.
1983 */
1984 if (th->pending_interrupt_queue_checked) {
1985 return 0;
1986 }
1987
1988 if (rb_threadptr_pending_interrupt_empty_p(th)) {
1989 return 0;
1990 }
1991
1992 return 1;
1993}
1994
1995static int
1996handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
1997{
1998 VALUE *maskp = (VALUE *)args;
1999
2000 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2001 rb_raise(rb_eArgError, "unknown mask signature");
2002 }
2003
2004 if (!*maskp) {
2005 *maskp = rb_ident_hash_new();
2006 }
2007 rb_hash_aset(*maskp, key, val);
2008
2009 return ST_CONTINUE;
2010}
2011
2012/*
2013 * call-seq:
2014 * Thread.handle_interrupt(hash) { ... } -> result of the block
2015 *
2016 * Changes asynchronous interrupt timing.
2017 *
2018 * _interrupt_ means asynchronous event and corresponding procedure
2019 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2020 * and main thread termination (if main thread terminates, then all
2021 * other thread will be killed).
2022 *
2023 * The given +hash+ has pairs like <code>ExceptionClass =>
2024 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2025 * the given block. The TimingSymbol can be one of the following symbols:
2026 *
2027 * [+:immediate+] Invoke interrupts immediately.
2028 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2029 * [+:never+] Never invoke all interrupts.
2030 *
2031 * _BlockingOperation_ means that the operation will block the calling thread,
2032 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2033 * operation executed without GVL.
2034 *
2035 * Masked asynchronous interrupts are delayed until they are enabled.
2036 * This method is similar to sigprocmask(3).
2037 *
2038 * === NOTE
2039 *
2040 * Asynchronous interrupts are difficult to use.
2041 *
2042 * If you need to communicate between threads, please consider to use another way such as Queue.
2043 *
2044 * Or use them with deep understanding about this method.
2045 *
2046 * === Usage
2047 *
2048 * In this example, we can guard from Thread#raise exceptions.
2049 *
2050 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2051 * ignored in the first block of the main thread. In the second
2052 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2053 *
2054 * th = Thread.new do
2055 * Thread.handle_interrupt(RuntimeError => :never) {
2056 * begin
2057 * # You can write resource allocation code safely.
2058 * Thread.handle_interrupt(RuntimeError => :immediate) {
2059 * # ...
2060 * }
2061 * ensure
2062 * # You can write resource deallocation code safely.
2063 * end
2064 * }
2065 * end
2066 * Thread.pass
2067 * # ...
2068 * th.raise "stop"
2069 *
2070 * While we are ignoring the RuntimeError exception, it's safe to write our
2071 * resource allocation code. Then, the ensure block is where we can safely
2072 * deallocate your resources.
2073 *
2074 * ==== Guarding from Timeout::Error
2075 *
2076 * In the next example, we will guard from the Timeout::Error exception. This
2077 * will help prevent from leaking resources when Timeout::Error exceptions occur
2078 * during normal ensure clause. For this example we use the help of the
2079 * standard library Timeout, from lib/timeout.rb
2080 *
2081 * require 'timeout'
2082 * Thread.handle_interrupt(Timeout::Error => :never) {
2083 * timeout(10){
2084 * # Timeout::Error doesn't occur here
2085 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2086 * # possible to be killed by Timeout::Error
2087 * # while blocking operation
2088 * }
2089 * # Timeout::Error doesn't occur here
2090 * }
2091 * }
2092 *
2093 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2094 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2095 * operation that will block the calling thread is susceptible to a
2096 * Timeout::Error exception being raised.
2097 *
2098 * ==== Stack control settings
2099 *
2100 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2101 * to control more than one ExceptionClass and TimingSymbol at a time.
2102 *
2103 * Thread.handle_interrupt(FooError => :never) {
2104 * Thread.handle_interrupt(BarError => :never) {
2105 * # FooError and BarError are prohibited.
2106 * }
2107 * }
2108 *
2109 * ==== Inheritance with ExceptionClass
2110 *
2111 * All exceptions inherited from the ExceptionClass parameter will be considered.
2112 *
2113 * Thread.handle_interrupt(Exception => :never) {
2114 * # all exceptions inherited from Exception are prohibited.
2115 * }
2116 *
2117 * For handling all interrupts, use +Object+ and not +Exception+
2118 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2119 */
2120static VALUE
2121rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2122{
2123 VALUE mask;
2124 rb_execution_context_t * volatile ec = GET_EC();
2125 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2126 volatile VALUE r = Qnil;
2127 enum ruby_tag_type state;
2128
2129 if (!rb_block_given_p()) {
2130 rb_raise(rb_eArgError, "block is needed.");
2131 }
2132
2133 mask = 0;
2134 mask_arg = rb_to_hash_type(mask_arg);
2135 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2136 if (!mask) {
2137 return rb_yield(Qnil);
2138 }
2139 OBJ_FREEZE_RAW(mask);
2140 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2141 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2142 th->pending_interrupt_queue_checked = 0;
2143 RUBY_VM_SET_INTERRUPT(th->ec);
2144 }
2145
2146 EC_PUSH_TAG(th->ec);
2147 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2148 r = rb_yield(Qnil);
2149 }
2150 EC_POP_TAG();
2151
2152 rb_ary_pop(th->pending_interrupt_mask_stack);
2153 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2154 th->pending_interrupt_queue_checked = 0;
2155 RUBY_VM_SET_INTERRUPT(th->ec);
2156 }
2157
2158 RUBY_VM_CHECK_INTS(th->ec);
2159
2160 if (state) {
2161 EC_JUMP_TAG(th->ec, state);
2162 }
2163
2164 return r;
2165}
2166
2167/*
2168 * call-seq:
2169 * target_thread.pending_interrupt?(error = nil) -> true/false
2170 *
2171 * Returns whether or not the asynchronous queue is empty for the target thread.
2172 *
2173 * If +error+ is given, then check only for +error+ type deferred events.
2174 *
2175 * See ::pending_interrupt? for more information.
2176 */
2177static VALUE
2178rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2179{
2180 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2181
2182 if (!target_th->pending_interrupt_queue) {
2183 return Qfalse;
2184 }
2185 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2186 return Qfalse;
2187 }
2188 if (rb_check_arity(argc, 0, 1)) {
2189 VALUE err = argv[0];
2190 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2191 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2192 }
2193 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2194 }
2195 else {
2196 return Qtrue;
2197 }
2198}
2199
2200/*
2201 * call-seq:
2202 * Thread.pending_interrupt?(error = nil) -> true/false
2203 *
2204 * Returns whether or not the asynchronous queue is empty.
2205 *
2206 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2207 * this method can be used to determine if there are any deferred events.
2208 *
2209 * If you find this method returns true, then you may finish +:never+ blocks.
2210 *
2211 * For example, the following method processes deferred asynchronous events
2212 * immediately.
2213 *
2214 * def Thread.kick_interrupt_immediately
2215 * Thread.handle_interrupt(Object => :immediate) {
2216 * Thread.pass
2217 * }
2218 * end
2219 *
2220 * If +error+ is given, then check only for +error+ type deferred events.
2221 *
2222 * === Usage
2223 *
2224 * th = Thread.new{
2225 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2226 * while true
2227 * ...
2228 * # reach safe point to invoke interrupt
2229 * if Thread.pending_interrupt?
2230 * Thread.handle_interrupt(Object => :immediate){}
2231 * end
2232 * ...
2233 * end
2234 * }
2235 * }
2236 * ...
2237 * th.raise # stop thread
2238 *
2239 * This example can also be written as the following, which you should use to
2240 * avoid asynchronous interrupts.
2241 *
2242 * flag = true
2243 * th = Thread.new{
2244 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2245 * while true
2246 * ...
2247 * # reach safe point to invoke interrupt
2248 * break if flag == false
2249 * ...
2250 * end
2251 * }
2252 * }
2253 * ...
2254 * flag = false # stop thread
2255 */
2256
2257static VALUE
2258rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2259{
2260 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2261}
2262
2263NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2264
2265static void
2266rb_threadptr_to_kill(rb_thread_t *th)
2267{
2268 rb_threadptr_pending_interrupt_clear(th);
2269 th->status = THREAD_RUNNABLE;
2270 th->to_kill = 1;
2271 th->ec->errinfo = INT2FIX(TAG_FATAL);
2272 EC_JUMP_TAG(th->ec, TAG_FATAL);
2273}
2274
2275static inline rb_atomic_t
2276threadptr_get_interrupts(rb_thread_t *th)
2277{
2278 rb_execution_context_t *ec = th->ec;
2279 rb_atomic_t interrupt;
2280 rb_atomic_t old;
2281
2282 do {
2283 interrupt = ec->interrupt_flag;
2284 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2285 } while (old != interrupt);
2286 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2287}
2288
2289#if USE_MJIT
2290// process.c
2291extern bool mjit_waitpid_finished;
2292extern int mjit_waitpid_status;
2293#endif
2294
2295MJIT_FUNC_EXPORTED int
2296rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2297{
2298 rb_atomic_t interrupt;
2299 int postponed_job_interrupt = 0;
2300 int ret = FALSE;
2301
2302 if (th->ec->raised_flag) return ret;
2303
2304 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2305 int sig;
2306 int timer_interrupt;
2307 int pending_interrupt;
2308 int trap_interrupt;
2309 int terminate_interrupt;
2310
2311 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2312 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2313 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2314 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2315 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2316
2317 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2318 RB_VM_LOCK_ENTER();
2319 RB_VM_LOCK_LEAVE();
2320 }
2321
2322 if (postponed_job_interrupt) {
2323 rb_postponed_job_flush(th->vm);
2324 }
2325
2326 /* signal handling */
2327 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2328 enum rb_thread_status prev_status = th->status;
2329 int sigwait_fd = rb_sigwait_fd_get(th);
2330
2331 if (sigwait_fd >= 0) {
2332 (void)consume_communication_pipe(sigwait_fd);
2333 ruby_sigchld_handler(th->vm);
2334 rb_sigwait_fd_put(th, sigwait_fd);
2335 rb_sigwait_fd_migrate(th->vm);
2336 }
2337 th->status = THREAD_RUNNABLE;
2338 while ((sig = rb_get_next_signal()) != 0) {
2339 ret |= rb_signal_exec(th, sig);
2340 }
2341 th->status = prev_status;
2342 }
2343
2344#if USE_MJIT
2345 // Handle waitpid_signal for MJIT issued by ruby_sigchld_handler. This needs to be done
2346 // outside ruby_sigchld_handler to avoid recursively relying on the SIGCHLD handler.
2347 if (mjit_waitpid_finished && th == th->vm->ractor.main_thread) {
2348 mjit_waitpid_finished = false;
2349 mjit_notify_waitpid(WIFEXITED(mjit_waitpid_status) ? WEXITSTATUS(mjit_waitpid_status) : -1);
2350 }
2351#endif
2352
2353 /* exception from another thread */
2354 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2355 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2356 RUBY_DEBUG_LOG("err:%"PRIdVALUE"\n", err);
2357 ret = TRUE;
2358
2359 if (UNDEF_P(err)) {
2360 /* no error */
2361 }
2362 else if (err == eKillSignal /* Thread#kill received */ ||
2363 err == eTerminateSignal /* Terminate thread */ ||
2364 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2365 terminate_interrupt = 1;
2366 }
2367 else {
2368 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2369 /* the only special exception to be queued across thread */
2370 err = ruby_vm_special_exception_copy(err);
2371 }
2372 /* set runnable if th was slept. */
2373 if (th->status == THREAD_STOPPED ||
2374 th->status == THREAD_STOPPED_FOREVER)
2375 th->status = THREAD_RUNNABLE;
2376 rb_exc_raise(err);
2377 }
2378 }
2379
2380 if (terminate_interrupt) {
2381 rb_threadptr_to_kill(th);
2382 }
2383
2384 if (timer_interrupt) {
2385 uint32_t limits_us = TIME_QUANTUM_USEC;
2386
2387 if (th->priority > 0)
2388 limits_us <<= th->priority;
2389 else
2390 limits_us >>= -th->priority;
2391
2392 if (th->status == THREAD_RUNNABLE)
2393 th->running_time_us += TIME_QUANTUM_USEC;
2394
2395 VM_ASSERT(th->ec->cfp);
2396 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2397 0, 0, 0, Qundef);
2398
2399 rb_thread_schedule_limits(limits_us);
2400 }
2401 }
2402 return ret;
2403}
2404
2405void
2406rb_thread_execute_interrupts(VALUE thval)
2407{
2408 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2409}
2410
2411static void
2412rb_threadptr_ready(rb_thread_t *th)
2413{
2414 rb_threadptr_interrupt(th);
2415}
2416
2417static VALUE
2418rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2419{
2420 VALUE exc;
2421
2422 if (rb_threadptr_dead(target_th)) {
2423 return Qnil;
2424 }
2425
2426 if (argc == 0) {
2427 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2428 }
2429 else {
2430 exc = rb_make_exception(argc, argv);
2431 }
2432
2433 /* making an exception object can switch thread,
2434 so we need to check thread deadness again */
2435 if (rb_threadptr_dead(target_th)) {
2436 return Qnil;
2437 }
2438
2439 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2440 rb_threadptr_pending_interrupt_enque(target_th, exc);
2441 rb_threadptr_interrupt(target_th);
2442 return Qnil;
2443}
2444
2445void
2446rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2447{
2448 VALUE argv[2];
2449
2450 argv[0] = rb_eSignal;
2451 argv[1] = INT2FIX(sig);
2452 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2453}
2454
2455void
2456rb_threadptr_signal_exit(rb_thread_t *th)
2457{
2458 VALUE argv[2];
2459
2460 argv[0] = rb_eSystemExit;
2461 argv[1] = rb_str_new2("exit");
2462
2463 // TODO: check signal raise deliverly
2464 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2465}
2466
2467int
2468rb_ec_set_raised(rb_execution_context_t *ec)
2469{
2470 if (ec->raised_flag & RAISED_EXCEPTION) {
2471 return 1;
2472 }
2473 ec->raised_flag |= RAISED_EXCEPTION;
2474 return 0;
2475}
2476
2477int
2478rb_ec_reset_raised(rb_execution_context_t *ec)
2479{
2480 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2481 return 0;
2482 }
2483 ec->raised_flag &= ~RAISED_EXCEPTION;
2484 return 1;
2485}
2486
2487int
2488rb_notify_fd_close(int fd, struct ccan_list_head *busy)
2489{
2490 rb_vm_t *vm = GET_THREAD()->vm;
2491 struct waiting_fd *wfd = 0, *next;
2492
2493 RB_VM_LOCK_ENTER();
2494 {
2495 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2496 if (wfd->fd == fd) {
2497 rb_thread_t *th = wfd->th;
2498 VALUE err;
2499
2500 ccan_list_del(&wfd->wfd_node);
2501 ccan_list_add(busy, &wfd->wfd_node);
2502
2503 err = th->vm->special_exceptions[ruby_error_stream_closed];
2504 rb_threadptr_pending_interrupt_enque(th, err);
2505 rb_threadptr_interrupt(th);
2506 }
2507 }
2508 }
2509 RB_VM_LOCK_LEAVE();
2510
2511 return !ccan_list_empty(busy);
2512}
2513
2514void
2516{
2517 struct ccan_list_head busy;
2518
2519 ccan_list_head_init(&busy);
2520 if (rb_notify_fd_close(fd, &busy)) {
2521 do rb_thread_schedule(); while (!ccan_list_empty(&busy));
2522 }
2523}
2524
2525/*
2526 * call-seq:
2527 * thr.raise
2528 * thr.raise(string)
2529 * thr.raise(exception [, string [, array]])
2530 *
2531 * Raises an exception from the given thread. The caller does not have to be
2532 * +thr+. See Kernel#raise for more information.
2533 *
2534 * Thread.abort_on_exception = true
2535 * a = Thread.new { sleep(200) }
2536 * a.raise("Gotcha")
2537 *
2538 * This will produce:
2539 *
2540 * prog.rb:3: Gotcha (RuntimeError)
2541 * from prog.rb:2:in `initialize'
2542 * from prog.rb:2:in `new'
2543 * from prog.rb:2
2544 */
2545
2546static VALUE
2547thread_raise_m(int argc, VALUE *argv, VALUE self)
2548{
2549 rb_thread_t *target_th = rb_thread_ptr(self);
2550 const rb_thread_t *current_th = GET_THREAD();
2551
2552 threadptr_check_pending_interrupt_queue(target_th);
2553 rb_threadptr_raise(target_th, argc, argv);
2554
2555 /* To perform Thread.current.raise as Kernel.raise */
2556 if (current_th == target_th) {
2557 RUBY_VM_CHECK_INTS(target_th->ec);
2558 }
2559 return Qnil;
2560}
2561
2562
2563/*
2564 * call-seq:
2565 * thr.exit -> thr
2566 * thr.kill -> thr
2567 * thr.terminate -> thr
2568 *
2569 * Terminates +thr+ and schedules another thread to be run, returning
2570 * the terminated Thread. If this is the main thread, or the last
2571 * thread, exits the process.
2572 */
2573
2574VALUE
2576{
2577 rb_thread_t *target_th = rb_thread_ptr(thread);
2578
2579 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2580 return thread;
2581 }
2582 if (target_th == target_th->vm->ractor.main_thread) {
2583 rb_exit(EXIT_SUCCESS);
2584 }
2585
2586 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2587
2588 if (target_th == GET_THREAD()) {
2589 /* kill myself immediately */
2590 rb_threadptr_to_kill(target_th);
2591 }
2592 else {
2593 threadptr_check_pending_interrupt_queue(target_th);
2594 rb_threadptr_pending_interrupt_enque(target_th, eKillSignal);
2595 rb_threadptr_interrupt(target_th);
2596 }
2597
2598 return thread;
2599}
2600
2601int
2602rb_thread_to_be_killed(VALUE thread)
2603{
2604 rb_thread_t *target_th = rb_thread_ptr(thread);
2605
2606 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2607 return TRUE;
2608 }
2609 return FALSE;
2610}
2611
2612/*
2613 * call-seq:
2614 * Thread.kill(thread) -> thread
2615 *
2616 * Causes the given +thread+ to exit, see also Thread::exit.
2617 *
2618 * count = 0
2619 * a = Thread.new { loop { count += 1 } }
2620 * sleep(0.1) #=> 0
2621 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2622 * count #=> 93947
2623 * a.alive? #=> false
2624 */
2625
2626static VALUE
2627rb_thread_s_kill(VALUE obj, VALUE th)
2628{
2629 return rb_thread_kill(th);
2630}
2631
2632
2633/*
2634 * call-seq:
2635 * Thread.exit -> thread
2636 *
2637 * Terminates the currently running thread and schedules another thread to be
2638 * run.
2639 *
2640 * If this thread is already marked to be killed, ::exit returns the Thread.
2641 *
2642 * If this is the main thread, or the last thread, exit the process.
2643 */
2644
2645static VALUE
2646rb_thread_exit(VALUE _)
2647{
2648 rb_thread_t *th = GET_THREAD();
2649 return rb_thread_kill(th->self);
2650}
2651
2652
2653/*
2654 * call-seq:
2655 * thr.wakeup -> thr
2656 *
2657 * Marks a given thread as eligible for scheduling, however it may still
2658 * remain blocked on I/O.
2659 *
2660 * *Note:* This does not invoke the scheduler, see #run for more information.
2661 *
2662 * c = Thread.new { Thread.stop; puts "hey!" }
2663 * sleep 0.1 while c.status!='sleep'
2664 * c.wakeup
2665 * c.join
2666 * #=> "hey!"
2667 */
2668
2669VALUE
2671{
2672 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2673 rb_raise(rb_eThreadError, "killed thread");
2674 }
2675 return thread;
2676}
2677
2678VALUE
2680{
2681 rb_thread_t *target_th = rb_thread_ptr(thread);
2682 if (target_th->status == THREAD_KILLED) return Qnil;
2683
2684 rb_threadptr_ready(target_th);
2685
2686 if (target_th->status == THREAD_STOPPED ||
2687 target_th->status == THREAD_STOPPED_FOREVER) {
2688 target_th->status = THREAD_RUNNABLE;
2689 }
2690
2691 return thread;
2692}
2693
2694
2695/*
2696 * call-seq:
2697 * thr.run -> thr
2698 *
2699 * Wakes up +thr+, making it eligible for scheduling.
2700 *
2701 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2702 * sleep 0.1 while a.status!='sleep'
2703 * puts "Got here"
2704 * a.run
2705 * a.join
2706 *
2707 * This will produce:
2708 *
2709 * a
2710 * Got here
2711 * c
2712 *
2713 * See also the instance method #wakeup.
2714 */
2715
2716VALUE
2718{
2719 rb_thread_wakeup(thread);
2721 return thread;
2722}
2723
2724
2725VALUE
2727{
2728 if (rb_thread_alone()) {
2730 "stopping only thread\n\tnote: use sleep to stop forever");
2731 }
2733 return Qnil;
2734}
2735
2736/*
2737 * call-seq:
2738 * Thread.stop -> nil
2739 *
2740 * Stops execution of the current thread, putting it into a ``sleep'' state,
2741 * and schedules execution of another thread.
2742 *
2743 * a = Thread.new { print "a"; Thread.stop; print "c" }
2744 * sleep 0.1 while a.status!='sleep'
2745 * print "b"
2746 * a.run
2747 * a.join
2748 * #=> "abc"
2749 */
2750
2751static VALUE
2752thread_stop(VALUE _)
2753{
2754 return rb_thread_stop();
2755}
2756
2757/********************************************************************/
2758
2759VALUE
2760rb_thread_list(void)
2761{
2762 // TODO
2763 return rb_ractor_thread_list(GET_RACTOR());
2764}
2765
2766/*
2767 * call-seq:
2768 * Thread.list -> array
2769 *
2770 * Returns an array of Thread objects for all threads that are either runnable
2771 * or stopped.
2772 *
2773 * Thread.new { sleep(200) }
2774 * Thread.new { 1000000.times {|i| i*i } }
2775 * Thread.new { Thread.stop }
2776 * Thread.list.each {|t| p t}
2777 *
2778 * This will produce:
2779 *
2780 * #<Thread:0x401b3e84 sleep>
2781 * #<Thread:0x401b3f38 run>
2782 * #<Thread:0x401b3fb0 sleep>
2783 * #<Thread:0x401bdf4c run>
2784 */
2785
2786static VALUE
2787thread_list(VALUE _)
2788{
2789 return rb_thread_list();
2790}
2791
2792VALUE
2794{
2795 return GET_THREAD()->self;
2796}
2797
2798/*
2799 * call-seq:
2800 * Thread.current -> thread
2801 *
2802 * Returns the currently executing thread.
2803 *
2804 * Thread.current #=> #<Thread:0x401bdf4c run>
2805 */
2806
2807static VALUE
2808thread_s_current(VALUE klass)
2809{
2810 return rb_thread_current();
2811}
2812
2813VALUE
2815{
2816 return GET_RACTOR()->threads.main->self;
2817}
2818
2819/*
2820 * call-seq:
2821 * Thread.main -> thread
2822 *
2823 * Returns the main thread.
2824 */
2825
2826static VALUE
2827rb_thread_s_main(VALUE klass)
2828{
2829 return rb_thread_main();
2830}
2831
2832
2833/*
2834 * call-seq:
2835 * Thread.abort_on_exception -> true or false
2836 *
2837 * Returns the status of the global ``abort on exception'' condition.
2838 *
2839 * The default is +false+.
2840 *
2841 * When set to +true+, if any thread is aborted by an exception, the
2842 * raised exception will be re-raised in the main thread.
2843 *
2844 * Can also be specified by the global $DEBUG flag or command line option
2845 * +-d+.
2846 *
2847 * See also ::abort_on_exception=.
2848 *
2849 * There is also an instance level method to set this for a specific thread,
2850 * see #abort_on_exception.
2851 */
2852
2853static VALUE
2854rb_thread_s_abort_exc(VALUE _)
2855{
2856 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
2857}
2858
2859
2860/*
2861 * call-seq:
2862 * Thread.abort_on_exception= boolean -> true or false
2863 *
2864 * When set to +true+, if any thread is aborted by an exception, the
2865 * raised exception will be re-raised in the main thread.
2866 * Returns the new state.
2867 *
2868 * Thread.abort_on_exception = true
2869 * t1 = Thread.new do
2870 * puts "In new thread"
2871 * raise "Exception from thread"
2872 * end
2873 * sleep(1)
2874 * puts "not reached"
2875 *
2876 * This will produce:
2877 *
2878 * In new thread
2879 * prog.rb:4: Exception from thread (RuntimeError)
2880 * from prog.rb:2:in `initialize'
2881 * from prog.rb:2:in `new'
2882 * from prog.rb:2
2883 *
2884 * See also ::abort_on_exception.
2885 *
2886 * There is also an instance level method to set this for a specific thread,
2887 * see #abort_on_exception=.
2888 */
2889
2890static VALUE
2891rb_thread_s_abort_exc_set(VALUE self, VALUE val)
2892{
2893 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
2894 return val;
2895}
2896
2897
2898/*
2899 * call-seq:
2900 * thr.abort_on_exception -> true or false
2901 *
2902 * Returns the status of the thread-local ``abort on exception'' condition for
2903 * this +thr+.
2904 *
2905 * The default is +false+.
2906 *
2907 * See also #abort_on_exception=.
2908 *
2909 * There is also a class level method to set this for all threads, see
2910 * ::abort_on_exception.
2911 */
2912
2913static VALUE
2914rb_thread_abort_exc(VALUE thread)
2915{
2916 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
2917}
2918
2919
2920/*
2921 * call-seq:
2922 * thr.abort_on_exception= boolean -> true or false
2923 *
2924 * When set to +true+, if this +thr+ is aborted by an exception, the
2925 * raised exception will be re-raised in the main thread.
2926 *
2927 * See also #abort_on_exception.
2928 *
2929 * There is also a class level method to set this for all threads, see
2930 * ::abort_on_exception=.
2931 */
2932
2933static VALUE
2934rb_thread_abort_exc_set(VALUE thread, VALUE val)
2935{
2936 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
2937 return val;
2938}
2939
2940
2941/*
2942 * call-seq:
2943 * Thread.report_on_exception -> true or false
2944 *
2945 * Returns the status of the global ``report on exception'' condition.
2946 *
2947 * The default is +true+ since Ruby 2.5.
2948 *
2949 * All threads created when this flag is true will report
2950 * a message on $stderr if an exception kills the thread.
2951 *
2952 * Thread.new { 1.times { raise } }
2953 *
2954 * will produce this output on $stderr:
2955 *
2956 * #<Thread:...> terminated with exception (report_on_exception is true):
2957 * Traceback (most recent call last):
2958 * 2: from -e:1:in `block in <main>'
2959 * 1: from -e:1:in `times'
2960 *
2961 * This is done to catch errors in threads early.
2962 * In some cases, you might not want this output.
2963 * There are multiple ways to avoid the extra output:
2964 *
2965 * * If the exception is not intended, the best is to fix the cause of
2966 * the exception so it does not happen anymore.
2967 * * If the exception is intended, it might be better to rescue it closer to
2968 * where it is raised rather then let it kill the Thread.
2969 * * If it is guaranteed the Thread will be joined with Thread#join or
2970 * Thread#value, then it is safe to disable this report with
2971 * <code>Thread.current.report_on_exception = false</code>
2972 * when starting the Thread.
2973 * However, this might handle the exception much later, or not at all
2974 * if the Thread is never joined due to the parent thread being blocked, etc.
2975 *
2976 * See also ::report_on_exception=.
2977 *
2978 * There is also an instance level method to set this for a specific thread,
2979 * see #report_on_exception=.
2980 *
2981 */
2982
2983static VALUE
2984rb_thread_s_report_exc(VALUE _)
2985{
2986 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
2987}
2988
2989
2990/*
2991 * call-seq:
2992 * Thread.report_on_exception= boolean -> true or false
2993 *
2994 * Returns the new state.
2995 * When set to +true+, all threads created afterwards will inherit the
2996 * condition and report a message on $stderr if an exception kills a thread:
2997 *
2998 * Thread.report_on_exception = true
2999 * t1 = Thread.new do
3000 * puts "In new thread"
3001 * raise "Exception from thread"
3002 * end
3003 * sleep(1)
3004 * puts "In the main thread"
3005 *
3006 * This will produce:
3007 *
3008 * In new thread
3009 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3010 * Traceback (most recent call last):
3011 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3012 * In the main thread
3013 *
3014 * See also ::report_on_exception.
3015 *
3016 * There is also an instance level method to set this for a specific thread,
3017 * see #report_on_exception=.
3018 */
3019
3020static VALUE
3021rb_thread_s_report_exc_set(VALUE self, VALUE val)
3022{
3023 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3024 return val;
3025}
3026
3027
3028/*
3029 * call-seq:
3030 * Thread.ignore_deadlock -> true or false
3031 *
3032 * Returns the status of the global ``ignore deadlock'' condition.
3033 * The default is +false+, so that deadlock conditions are not ignored.
3034 *
3035 * See also ::ignore_deadlock=.
3036 *
3037 */
3038
3039static VALUE
3040rb_thread_s_ignore_deadlock(VALUE _)
3041{
3042 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3043}
3044
3045
3046/*
3047 * call-seq:
3048 * Thread.ignore_deadlock = boolean -> true or false
3049 *
3050 * Returns the new state.
3051 * When set to +true+, the VM will not check for deadlock conditions.
3052 * It is only useful to set this if your application can break a
3053 * deadlock condition via some other means, such as a signal.
3054 *
3055 * Thread.ignore_deadlock = true
3056 * queue = Thread::Queue.new
3057 *
3058 * trap(:SIGUSR1){queue.push "Received signal"}
3059 *
3060 * # raises fatal error unless ignoring deadlock
3061 * puts queue.pop
3062 *
3063 * See also ::ignore_deadlock.
3064 */
3065
3066static VALUE
3067rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3068{
3069 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3070 return val;
3071}
3072
3073
3074/*
3075 * call-seq:
3076 * thr.report_on_exception -> true or false
3077 *
3078 * Returns the status of the thread-local ``report on exception'' condition for
3079 * this +thr+.
3080 *
3081 * The default value when creating a Thread is the value of
3082 * the global flag Thread.report_on_exception.
3083 *
3084 * See also #report_on_exception=.
3085 *
3086 * There is also a class level method to set this for all new threads, see
3087 * ::report_on_exception=.
3088 */
3089
3090static VALUE
3091rb_thread_report_exc(VALUE thread)
3092{
3093 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3094}
3095
3096
3097/*
3098 * call-seq:
3099 * thr.report_on_exception= boolean -> true or false
3100 *
3101 * When set to +true+, a message is printed on $stderr if an exception
3102 * kills this +thr+. See ::report_on_exception for details.
3103 *
3104 * See also #report_on_exception.
3105 *
3106 * There is also a class level method to set this for all new threads, see
3107 * ::report_on_exception=.
3108 */
3109
3110static VALUE
3111rb_thread_report_exc_set(VALUE thread, VALUE val)
3112{
3113 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3114 return val;
3115}
3116
3117
3118/*
3119 * call-seq:
3120 * thr.group -> thgrp or nil
3121 *
3122 * Returns the ThreadGroup which contains the given thread.
3123 *
3124 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3125 */
3126
3127VALUE
3128rb_thread_group(VALUE thread)
3129{
3130 return rb_thread_ptr(thread)->thgroup;
3131}
3132
3133static const char *
3134thread_status_name(rb_thread_t *th, int detail)
3135{
3136 switch (th->status) {
3137 case THREAD_RUNNABLE:
3138 return th->to_kill ? "aborting" : "run";
3139 case THREAD_STOPPED_FOREVER:
3140 if (detail) return "sleep_forever";
3141 case THREAD_STOPPED:
3142 return "sleep";
3143 case THREAD_KILLED:
3144 return "dead";
3145 default:
3146 return "unknown";
3147 }
3148}
3149
3150static int
3151rb_threadptr_dead(rb_thread_t *th)
3152{
3153 return th->status == THREAD_KILLED;
3154}
3155
3156
3157/*
3158 * call-seq:
3159 * thr.status -> string, false or nil
3160 *
3161 * Returns the status of +thr+.
3162 *
3163 * [<tt>"sleep"</tt>]
3164 * Returned if this thread is sleeping or waiting on I/O
3165 * [<tt>"run"</tt>]
3166 * When this thread is executing
3167 * [<tt>"aborting"</tt>]
3168 * If this thread is aborting
3169 * [+false+]
3170 * When this thread is terminated normally
3171 * [+nil+]
3172 * If terminated with an exception.
3173 *
3174 * a = Thread.new { raise("die now") }
3175 * b = Thread.new { Thread.stop }
3176 * c = Thread.new { Thread.exit }
3177 * d = Thread.new { sleep }
3178 * d.kill #=> #<Thread:0x401b3678 aborting>
3179 * a.status #=> nil
3180 * b.status #=> "sleep"
3181 * c.status #=> false
3182 * d.status #=> "aborting"
3183 * Thread.current.status #=> "run"
3184 *
3185 * See also the instance methods #alive? and #stop?
3186 */
3187
3188static VALUE
3189rb_thread_status(VALUE thread)
3190{
3191 rb_thread_t *target_th = rb_thread_ptr(thread);
3192
3193 if (rb_threadptr_dead(target_th)) {
3194 if (!NIL_P(target_th->ec->errinfo) &&
3195 !FIXNUM_P(target_th->ec->errinfo)) {
3196 return Qnil;
3197 }
3198 else {
3199 return Qfalse;
3200 }
3201 }
3202 else {
3203 return rb_str_new2(thread_status_name(target_th, FALSE));
3204 }
3205}
3206
3207
3208/*
3209 * call-seq:
3210 * thr.alive? -> true or false
3211 *
3212 * Returns +true+ if +thr+ is running or sleeping.
3213 *
3214 * thr = Thread.new { }
3215 * thr.join #=> #<Thread:0x401b3fb0 dead>
3216 * Thread.current.alive? #=> true
3217 * thr.alive? #=> false
3218 *
3219 * See also #stop? and #status.
3220 */
3221
3222static VALUE
3223rb_thread_alive_p(VALUE thread)
3224{
3225 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3226}
3227
3228/*
3229 * call-seq:
3230 * thr.stop? -> true or false
3231 *
3232 * Returns +true+ if +thr+ is dead or sleeping.
3233 *
3234 * a = Thread.new { Thread.stop }
3235 * b = Thread.current
3236 * a.stop? #=> true
3237 * b.stop? #=> false
3238 *
3239 * See also #alive? and #status.
3240 */
3241
3242static VALUE
3243rb_thread_stop_p(VALUE thread)
3244{
3245 rb_thread_t *th = rb_thread_ptr(thread);
3246
3247 if (rb_threadptr_dead(th)) {
3248 return Qtrue;
3249 }
3250 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3251}
3252
3253/*
3254 * call-seq:
3255 * thr.name -> string
3256 *
3257 * show the name of the thread.
3258 */
3259
3260static VALUE
3261rb_thread_getname(VALUE thread)
3262{
3263 return rb_thread_ptr(thread)->name;
3264}
3265
3266/*
3267 * call-seq:
3268 * thr.name=(name) -> string
3269 *
3270 * set given name to the ruby thread.
3271 * On some platform, it may set the name to pthread and/or kernel.
3272 */
3273
3274static VALUE
3275rb_thread_setname(VALUE thread, VALUE name)
3276{
3277 rb_thread_t *target_th = rb_thread_ptr(thread);
3278
3279 if (!NIL_P(name)) {
3280 rb_encoding *enc;
3281 StringValueCStr(name);
3282 enc = rb_enc_get(name);
3283 if (!rb_enc_asciicompat(enc)) {
3284 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3285 rb_enc_name(enc));
3286 }
3287 name = rb_str_new_frozen(name);
3288 }
3289 target_th->name = name;
3290 if (threadptr_initialized(target_th)) {
3291 native_set_another_thread_name(target_th->nt->thread_id, name);
3292 }
3293 return name;
3294}
3295
3296#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3297/*
3298 * call-seq:
3299 * thr.native_thread_id -> integer
3300 *
3301 * Return the native thread ID which is used by the Ruby thread.
3302 *
3303 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3304 * * On Linux it is TID returned by gettid(2).
3305 * * On macOS it is the system-wide unique integral ID of thread returned
3306 * by pthread_threadid_np(3).
3307 * * On FreeBSD it is the unique integral ID of the thread returned by
3308 * pthread_getthreadid_np(3).
3309 * * On Windows it is the thread identifier returned by GetThreadId().
3310 * * On other platforms, it raises NotImplementedError.
3311 *
3312 * NOTE:
3313 * If the thread is not associated yet or already deassociated with a native
3314 * thread, it returns _nil_.
3315 * If the Ruby implementation uses M:N thread model, the ID may change
3316 * depending on the timing.
3317 */
3318
3319static VALUE
3320rb_thread_native_thread_id(VALUE thread)
3321{
3322 rb_thread_t *target_th = rb_thread_ptr(thread);
3323 if (rb_threadptr_dead(target_th)) return Qnil;
3324 return native_thread_native_thread_id(target_th);
3325}
3326#else
3327# define rb_thread_native_thread_id rb_f_notimplement
3328#endif
3329
3330/*
3331 * call-seq:
3332 * thr.to_s -> string
3333 *
3334 * Dump the name, id, and status of _thr_ to a string.
3335 */
3336
3337static VALUE
3338rb_thread_to_s(VALUE thread)
3339{
3340 VALUE cname = rb_class_path(rb_obj_class(thread));
3341 rb_thread_t *target_th = rb_thread_ptr(thread);
3342 const char *status;
3343 VALUE str, loc;
3344
3345 status = thread_status_name(target_th, TRUE);
3346 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3347 if (!NIL_P(target_th->name)) {
3348 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3349 }
3350 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3351 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3352 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3353 }
3354 rb_str_catf(str, " %s>", status);
3355
3356 return str;
3357}
3358
3359/* variables for recursive traversals */
3360#define recursive_key id__recursive_key__
3361
3362static VALUE
3363threadptr_local_aref(rb_thread_t *th, ID id)
3364{
3365 if (id == recursive_key) {
3366 return th->ec->local_storage_recursive_hash;
3367 }
3368 else {
3369 VALUE val;
3370 struct rb_id_table *local_storage = th->ec->local_storage;
3371
3372 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3373 return val;
3374 }
3375 else {
3376 return Qnil;
3377 }
3378 }
3379}
3380
3381VALUE
3383{
3384 return threadptr_local_aref(rb_thread_ptr(thread), id);
3385}
3386
3387/*
3388 * call-seq:
3389 * thr[sym] -> obj or nil
3390 *
3391 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3392 * if not explicitly inside a Fiber), using either a symbol or a string name.
3393 * If the specified variable does not exist, returns +nil+.
3394 *
3395 * [
3396 * Thread.new { Thread.current["name"] = "A" },
3397 * Thread.new { Thread.current[:name] = "B" },
3398 * Thread.new { Thread.current["name"] = "C" }
3399 * ].each do |th|
3400 * th.join
3401 * puts "#{th.inspect}: #{th[:name]}"
3402 * end
3403 *
3404 * This will produce:
3405 *
3406 * #<Thread:0x00000002a54220 dead>: A
3407 * #<Thread:0x00000002a541a8 dead>: B
3408 * #<Thread:0x00000002a54130 dead>: C
3409 *
3410 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3411 * This confusion did not exist in Ruby 1.8 because
3412 * fibers are only available since Ruby 1.9.
3413 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3414 * following idiom for dynamic scope.
3415 *
3416 * def meth(newvalue)
3417 * begin
3418 * oldvalue = Thread.current[:name]
3419 * Thread.current[:name] = newvalue
3420 * yield
3421 * ensure
3422 * Thread.current[:name] = oldvalue
3423 * end
3424 * end
3425 *
3426 * The idiom may not work as dynamic scope if the methods are thread-local
3427 * and a given block switches fiber.
3428 *
3429 * f = Fiber.new {
3430 * meth(1) {
3431 * Fiber.yield
3432 * }
3433 * }
3434 * meth(2) {
3435 * f.resume
3436 * }
3437 * f.resume
3438 * p Thread.current[:name]
3439 * #=> nil if fiber-local
3440 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3441 *
3442 * For thread-local variables, please see #thread_variable_get and
3443 * #thread_variable_set.
3444 *
3445 */
3446
3447static VALUE
3448rb_thread_aref(VALUE thread, VALUE key)
3449{
3450 ID id = rb_check_id(&key);
3451 if (!id) return Qnil;
3452 return rb_thread_local_aref(thread, id);
3453}
3454
3455/*
3456 * call-seq:
3457 * thr.fetch(sym) -> obj
3458 * thr.fetch(sym) { } -> obj
3459 * thr.fetch(sym, default) -> obj
3460 *
3461 * Returns a fiber-local for the given key. If the key can't be
3462 * found, there are several options: With no other arguments, it will
3463 * raise a KeyError exception; if <i>default</i> is given, then that
3464 * will be returned; if the optional code block is specified, then
3465 * that will be run and its result returned. See Thread#[] and
3466 * Hash#fetch.
3467 */
3468static VALUE
3469rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3470{
3471 VALUE key, val;
3472 ID id;
3473 rb_thread_t *target_th = rb_thread_ptr(self);
3474 int block_given;
3475
3476 rb_check_arity(argc, 1, 2);
3477 key = argv[0];
3478
3479 block_given = rb_block_given_p();
3480 if (block_given && argc == 2) {
3481 rb_warn("block supersedes default value argument");
3482 }
3483
3484 id = rb_check_id(&key);
3485
3486 if (id == recursive_key) {
3487 return target_th->ec->local_storage_recursive_hash;
3488 }
3489 else if (id && target_th->ec->local_storage &&
3490 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3491 return val;
3492 }
3493 else if (block_given) {
3494 return rb_yield(key);
3495 }
3496 else if (argc == 1) {
3497 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3498 }
3499 else {
3500 return argv[1];
3501 }
3502}
3503
3504static VALUE
3505threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3506{
3507 if (id == recursive_key) {
3508 th->ec->local_storage_recursive_hash = val;
3509 return val;
3510 }
3511 else {
3512 struct rb_id_table *local_storage = th->ec->local_storage;
3513
3514 if (NIL_P(val)) {
3515 if (!local_storage) return Qnil;
3516 rb_id_table_delete(local_storage, id);
3517 return Qnil;
3518 }
3519 else {
3520 if (local_storage == NULL) {
3521 th->ec->local_storage = local_storage = rb_id_table_create(0);
3522 }
3523 rb_id_table_insert(local_storage, id, val);
3524 return val;
3525 }
3526 }
3527}
3528
3529VALUE
3531{
3532 if (OBJ_FROZEN(thread)) {
3533 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3534 }
3535
3536 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3537}
3538
3539/*
3540 * call-seq:
3541 * thr[sym] = obj -> obj
3542 *
3543 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3544 * using either a symbol or a string.
3545 *
3546 * See also Thread#[].
3547 *
3548 * For thread-local variables, please see #thread_variable_set and
3549 * #thread_variable_get.
3550 */
3551
3552static VALUE
3553rb_thread_aset(VALUE self, VALUE id, VALUE val)
3554{
3555 return rb_thread_local_aset(self, rb_to_id(id), val);
3556}
3557
3558/*
3559 * call-seq:
3560 * thr.thread_variable_get(key) -> obj or nil
3561 *
3562 * Returns the value of a thread local variable that has been set. Note that
3563 * these are different than fiber local values. For fiber local values,
3564 * please see Thread#[] and Thread#[]=.
3565 *
3566 * Thread local values are carried along with threads, and do not respect
3567 * fibers. For example:
3568 *
3569 * Thread.new {
3570 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3571 * Thread.current["foo"] = "bar" # set a fiber local
3572 *
3573 * Fiber.new {
3574 * Fiber.yield [
3575 * Thread.current.thread_variable_get("foo"), # get the thread local
3576 * Thread.current["foo"], # get the fiber local
3577 * ]
3578 * }.resume
3579 * }.join.value # => ['bar', nil]
3580 *
3581 * The value "bar" is returned for the thread local, where nil is returned
3582 * for the fiber local. The fiber is executed in the same thread, so the
3583 * thread local values are available.
3584 */
3585
3586static VALUE
3587rb_thread_variable_get(VALUE thread, VALUE key)
3588{
3589 VALUE locals;
3590
3591 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3592 return Qnil;
3593 }
3594 locals = rb_thread_local_storage(thread);
3595 return rb_hash_aref(locals, rb_to_symbol(key));
3596}
3597
3598/*
3599 * call-seq:
3600 * thr.thread_variable_set(key, value)
3601 *
3602 * Sets a thread local with +key+ to +value+. Note that these are local to
3603 * threads, and not to fibers. Please see Thread#thread_variable_get and
3604 * Thread#[] for more information.
3605 */
3606
3607static VALUE
3608rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3609{
3610 VALUE locals;
3611
3612 if (OBJ_FROZEN(thread)) {
3613 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3614 }
3615
3616 locals = rb_thread_local_storage(thread);
3617 return rb_hash_aset(locals, rb_to_symbol(key), val);
3618}
3619
3620/*
3621 * call-seq:
3622 * thr.key?(sym) -> true or false
3623 *
3624 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3625 * variable.
3626 *
3627 * me = Thread.current
3628 * me[:oliver] = "a"
3629 * me.key?(:oliver) #=> true
3630 * me.key?(:stanley) #=> false
3631 */
3632
3633static VALUE
3634rb_thread_key_p(VALUE self, VALUE key)
3635{
3636 VALUE val;
3637 ID id = rb_check_id(&key);
3638 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3639
3640 if (!id || local_storage == NULL) {
3641 return Qfalse;
3642 }
3643 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3644}
3645
3646static enum rb_id_table_iterator_result
3647thread_keys_i(ID key, VALUE value, void *ary)
3648{
3649 rb_ary_push((VALUE)ary, ID2SYM(key));
3650 return ID_TABLE_CONTINUE;
3651}
3652
3653int
3655{
3656 // TODO
3657 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3658}
3659
3660/*
3661 * call-seq:
3662 * thr.keys -> array
3663 *
3664 * Returns an array of the names of the fiber-local variables (as Symbols).
3665 *
3666 * thr = Thread.new do
3667 * Thread.current[:cat] = 'meow'
3668 * Thread.current["dog"] = 'woof'
3669 * end
3670 * thr.join #=> #<Thread:0x401b3f10 dead>
3671 * thr.keys #=> [:dog, :cat]
3672 */
3673
3674static VALUE
3675rb_thread_keys(VALUE self)
3676{
3677 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3678 VALUE ary = rb_ary_new();
3679
3680 if (local_storage) {
3681 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3682 }
3683 return ary;
3684}
3685
3686static int
3687keys_i(VALUE key, VALUE value, VALUE ary)
3688{
3689 rb_ary_push(ary, key);
3690 return ST_CONTINUE;
3691}
3692
3693/*
3694 * call-seq:
3695 * thr.thread_variables -> array
3696 *
3697 * Returns an array of the names of the thread-local variables (as Symbols).
3698 *
3699 * thr = Thread.new do
3700 * Thread.current.thread_variable_set(:cat, 'meow')
3701 * Thread.current.thread_variable_set("dog", 'woof')
3702 * end
3703 * thr.join #=> #<Thread:0x401b3f10 dead>
3704 * thr.thread_variables #=> [:dog, :cat]
3705 *
3706 * Note that these are not fiber local variables. Please see Thread#[] and
3707 * Thread#thread_variable_get for more details.
3708 */
3709
3710static VALUE
3711rb_thread_variables(VALUE thread)
3712{
3713 VALUE locals;
3714 VALUE ary;
3715
3716 ary = rb_ary_new();
3717 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3718 return ary;
3719 }
3720 locals = rb_thread_local_storage(thread);
3721 rb_hash_foreach(locals, keys_i, ary);
3722
3723 return ary;
3724}
3725
3726/*
3727 * call-seq:
3728 * thr.thread_variable?(key) -> true or false
3729 *
3730 * Returns +true+ if the given string (or symbol) exists as a thread-local
3731 * variable.
3732 *
3733 * me = Thread.current
3734 * me.thread_variable_set(:oliver, "a")
3735 * me.thread_variable?(:oliver) #=> true
3736 * me.thread_variable?(:stanley) #=> false
3737 *
3738 * Note that these are not fiber local variables. Please see Thread#[] and
3739 * Thread#thread_variable_get for more details.
3740 */
3741
3742static VALUE
3743rb_thread_variable_p(VALUE thread, VALUE key)
3744{
3745 VALUE locals;
3746
3747 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3748 return Qfalse;
3749 }
3750 locals = rb_thread_local_storage(thread);
3751
3752 return RBOOL(rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil);
3753}
3754
3755/*
3756 * call-seq:
3757 * thr.priority -> integer
3758 *
3759 * Returns the priority of <i>thr</i>. Default is inherited from the
3760 * current thread which creating the new thread, or zero for the
3761 * initial main thread; higher-priority thread will run more frequently
3762 * than lower-priority threads (but lower-priority threads can also run).
3763 *
3764 * This is just hint for Ruby thread scheduler. It may be ignored on some
3765 * platform.
3766 *
3767 * Thread.current.priority #=> 0
3768 */
3769
3770static VALUE
3771rb_thread_priority(VALUE thread)
3772{
3773 return INT2NUM(rb_thread_ptr(thread)->priority);
3774}
3775
3776
3777/*
3778 * call-seq:
3779 * thr.priority= integer -> thr
3780 *
3781 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3782 * will run more frequently than lower-priority threads (but lower-priority
3783 * threads can also run).
3784 *
3785 * This is just hint for Ruby thread scheduler. It may be ignored on some
3786 * platform.
3787 *
3788 * count1 = count2 = 0
3789 * a = Thread.new do
3790 * loop { count1 += 1 }
3791 * end
3792 * a.priority = -1
3793 *
3794 * b = Thread.new do
3795 * loop { count2 += 1 }
3796 * end
3797 * b.priority = -2
3798 * sleep 1 #=> 1
3799 * count1 #=> 622504
3800 * count2 #=> 5832
3801 */
3802
3803static VALUE
3804rb_thread_priority_set(VALUE thread, VALUE prio)
3805{
3806 rb_thread_t *target_th = rb_thread_ptr(thread);
3807 int priority;
3808
3809#if USE_NATIVE_THREAD_PRIORITY
3810 target_th->priority = NUM2INT(prio);
3811 native_thread_apply_priority(th);
3812#else
3813 priority = NUM2INT(prio);
3814 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3815 priority = RUBY_THREAD_PRIORITY_MAX;
3816 }
3817 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3818 priority = RUBY_THREAD_PRIORITY_MIN;
3819 }
3820 target_th->priority = (int8_t)priority;
3821#endif
3822 return INT2NUM(target_th->priority);
3823}
3824
3825/* for IO */
3826
3827#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3828
3829/*
3830 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3831 * in select(2) system call.
3832 *
3833 * - Linux 2.2.12 (?)
3834 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3835 * select(2) documents how to allocate fd_set dynamically.
3836 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3837 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3838 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3839 * select(2) documents how to allocate fd_set dynamically.
3840 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3841 * - Solaris 8 has select_large_fdset
3842 * - Mac OS X 10.7 (Lion)
3843 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3844 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3845 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
3846 *
3847 * When fd_set is not big enough to hold big file descriptors,
3848 * it should be allocated dynamically.
3849 * Note that this assumes fd_set is structured as bitmap.
3850 *
3851 * rb_fd_init allocates the memory.
3852 * rb_fd_term free the memory.
3853 * rb_fd_set may re-allocates bitmap.
3854 *
3855 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3856 */
3857
3858void
3860{
3861 fds->maxfd = 0;
3862 fds->fdset = ALLOC(fd_set);
3863 FD_ZERO(fds->fdset);
3864}
3865
3866void
3867rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
3868{
3869 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3870
3871 if (size < sizeof(fd_set))
3872 size = sizeof(fd_set);
3873 dst->maxfd = src->maxfd;
3874 dst->fdset = xmalloc(size);
3875 memcpy(dst->fdset, src->fdset, size);
3876}
3877
3878void
3880{
3881 if (fds->fdset) xfree(fds->fdset);
3882 fds->maxfd = 0;
3883 fds->fdset = 0;
3884}
3885
3886void
3888{
3889 if (fds->fdset)
3890 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3891}
3892
3893static void
3894rb_fd_resize(int n, rb_fdset_t *fds)
3895{
3896 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3897 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3898
3899 if (m < sizeof(fd_set)) m = sizeof(fd_set);
3900 if (o < sizeof(fd_set)) o = sizeof(fd_set);
3901
3902 if (m > o) {
3903 fds->fdset = xrealloc(fds->fdset, m);
3904 memset((char *)fds->fdset + o, 0, m - o);
3905 }
3906 if (n >= fds->maxfd) fds->maxfd = n + 1;
3907}
3908
3909void
3910rb_fd_set(int n, rb_fdset_t *fds)
3911{
3912 rb_fd_resize(n, fds);
3913 FD_SET(n, fds->fdset);
3914}
3915
3916void
3917rb_fd_clr(int n, rb_fdset_t *fds)
3918{
3919 if (n >= fds->maxfd) return;
3920 FD_CLR(n, fds->fdset);
3921}
3922
3923int
3924rb_fd_isset(int n, const rb_fdset_t *fds)
3925{
3926 if (n >= fds->maxfd) return 0;
3927 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3928}
3929
3930void
3931rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3932{
3933 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3934
3935 if (size < sizeof(fd_set)) size = sizeof(fd_set);
3936 dst->maxfd = max;
3937 dst->fdset = xrealloc(dst->fdset, size);
3938 memcpy(dst->fdset, src, size);
3939}
3940
3941void
3942rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3943{
3944 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3945
3946 if (size < sizeof(fd_set))
3947 size = sizeof(fd_set);
3948 dst->maxfd = src->maxfd;
3949 dst->fdset = xrealloc(dst->fdset, size);
3950 memcpy(dst->fdset, src->fdset, size);
3951}
3952
3953int
3954rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3955{
3956 fd_set *r = NULL, *w = NULL, *e = NULL;
3957 if (readfds) {
3958 rb_fd_resize(n - 1, readfds);
3959 r = rb_fd_ptr(readfds);
3960 }
3961 if (writefds) {
3962 rb_fd_resize(n - 1, writefds);
3963 w = rb_fd_ptr(writefds);
3964 }
3965 if (exceptfds) {
3966 rb_fd_resize(n - 1, exceptfds);
3967 e = rb_fd_ptr(exceptfds);
3968 }
3969 return select(n, r, w, e, timeout);
3970}
3971
3972#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
3973
3974#undef FD_ZERO
3975#undef FD_SET
3976#undef FD_CLR
3977#undef FD_ISSET
3978
3979#define FD_ZERO(f) rb_fd_zero(f)
3980#define FD_SET(i, f) rb_fd_set((i), (f))
3981#define FD_CLR(i, f) rb_fd_clr((i), (f))
3982#define FD_ISSET(i, f) rb_fd_isset((i), (f))
3983
3984#elif defined(_WIN32)
3985
3986void
3988{
3989 set->capa = FD_SETSIZE;
3990 set->fdset = ALLOC(fd_set);
3991 FD_ZERO(set->fdset);
3992}
3993
3994void
3995rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
3996{
3997 rb_fd_init(dst);
3998 rb_fd_dup(dst, src);
3999}
4000
4001void
4003{
4004 xfree(set->fdset);
4005 set->fdset = NULL;
4006 set->capa = 0;
4007}
4008
4009void
4010rb_fd_set(int fd, rb_fdset_t *set)
4011{
4012 unsigned int i;
4013 SOCKET s = rb_w32_get_osfhandle(fd);
4014
4015 for (i = 0; i < set->fdset->fd_count; i++) {
4016 if (set->fdset->fd_array[i] == s) {
4017 return;
4018 }
4019 }
4020 if (set->fdset->fd_count >= (unsigned)set->capa) {
4021 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4022 set->fdset =
4023 rb_xrealloc_mul_add(
4024 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4025 }
4026 set->fdset->fd_array[set->fdset->fd_count++] = s;
4027}
4028
4029#undef FD_ZERO
4030#undef FD_SET
4031#undef FD_CLR
4032#undef FD_ISSET
4033
4034#define FD_ZERO(f) rb_fd_zero(f)
4035#define FD_SET(i, f) rb_fd_set((i), (f))
4036#define FD_CLR(i, f) rb_fd_clr((i), (f))
4037#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4038
4039#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4040
4041#endif
4042
4043#ifndef rb_fd_no_init
4044#define rb_fd_no_init(fds) (void)(fds)
4045#endif
4046
4047static int
4048wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4049{
4050 if (*result < 0) {
4051 switch (errnum) {
4052 case EINTR:
4053#ifdef ERESTART
4054 case ERESTART:
4055#endif
4056 *result = 0;
4057 if (rel && hrtime_update_expire(rel, end)) {
4058 *rel = 0;
4059 }
4060 return TRUE;
4061 }
4062 return FALSE;
4063 }
4064 else if (*result == 0) {
4065 /* check for spurious wakeup */
4066 if (rel) {
4067 return !hrtime_update_expire(rel, end);
4068 }
4069 return TRUE;
4070 }
4071 return FALSE;
4072}
4073
4075 int max;
4076 int sigwait_fd;
4077 rb_thread_t *th;
4078 rb_fdset_t *rset;
4079 rb_fdset_t *wset;
4080 rb_fdset_t *eset;
4081 rb_fdset_t orig_rset;
4082 rb_fdset_t orig_wset;
4083 rb_fdset_t orig_eset;
4084 struct timeval *timeout;
4085};
4086
4087static VALUE
4088select_set_free(VALUE p)
4089{
4090 struct select_set *set = (struct select_set *)p;
4091
4092 if (set->sigwait_fd >= 0) {
4093 rb_sigwait_fd_put(set->th, set->sigwait_fd);
4094 rb_sigwait_fd_migrate(set->th->vm);
4095 }
4096
4097 rb_fd_term(&set->orig_rset);
4098 rb_fd_term(&set->orig_wset);
4099 rb_fd_term(&set->orig_eset);
4100
4101 return Qfalse;
4102}
4103
4104static const rb_hrtime_t *
4105sigwait_timeout(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *orig,
4106 int *drained_p)
4107{
4108 static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
4109
4110 if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
4111 *drained_p = check_signals_nogvl(th, sigwait_fd);
4112 if (!orig || *orig > quantum)
4113 return &quantum;
4114 }
4115
4116 return orig;
4117}
4118
4119#define sigwait_signals_fd(result, cond, sigwait_fd) \
4120 (result > 0 && (cond) ? (result--, (sigwait_fd)) : -1)
4121
4122static VALUE
4123do_select(VALUE p)
4124{
4125 struct select_set *set = (struct select_set *)p;
4126 int result = 0;
4127 int lerrno;
4128 rb_hrtime_t *to, rel, end = 0;
4129
4130 timeout_prepare(&to, &rel, &end, set->timeout);
4131#define restore_fdset(dst, src) \
4132 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4133#define do_select_update() \
4134 (restore_fdset(set->rset, &set->orig_rset), \
4135 restore_fdset(set->wset, &set->orig_wset), \
4136 restore_fdset(set->eset, &set->orig_eset), \
4137 TRUE)
4138
4139 do {
4140 int drained;
4141 lerrno = 0;
4142
4143 BLOCKING_REGION(set->th, {
4144 const rb_hrtime_t *sto;
4145 struct timeval tv;
4146
4147 sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
4148 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4149 result = native_fd_select(set->max, set->rset, set->wset,
4150 set->eset,
4151 rb_hrtime2timeval(&tv, sto), set->th);
4152 if (result < 0) lerrno = errno;
4153 }
4154 }, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, TRUE);
4155
4156 if (set->sigwait_fd >= 0) {
4157 int fd = sigwait_signals_fd(result,
4158 rb_fd_isset(set->sigwait_fd, set->rset),
4159 set->sigwait_fd);
4160 (void)check_signals_nogvl(set->th, fd);
4161 }
4162
4163 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4164 } while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4165
4166 if (result < 0) {
4167 errno = lerrno;
4168 }
4169
4170 return (VALUE)result;
4171}
4172
4173static rb_fdset_t *
4174init_set_fd(int fd, rb_fdset_t *fds)
4175{
4176 if (fd < 0) {
4177 return 0;
4178 }
4179 rb_fd_init(fds);
4180 rb_fd_set(fd, fds);
4181
4182 return fds;
4183}
4184
4185int
4186rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4187 struct timeval *timeout)
4188{
4189 struct select_set set;
4190
4191 set.th = GET_THREAD();
4192 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4193 set.max = max;
4194 set.rset = read;
4195 set.wset = write;
4196 set.eset = except;
4197 set.timeout = timeout;
4198
4199 if (!set.rset && !set.wset && !set.eset) {
4200 if (!timeout) {
4202 return 0;
4203 }
4204 rb_thread_wait_for(*timeout);
4205 return 0;
4206 }
4207
4208 set.sigwait_fd = rb_sigwait_fd_get(set.th);
4209 if (set.sigwait_fd >= 0) {
4210 if (set.rset)
4211 rb_fd_set(set.sigwait_fd, set.rset);
4212 else
4213 set.rset = init_set_fd(set.sigwait_fd, &set.orig_rset);
4214 if (set.sigwait_fd >= set.max) {
4215 set.max = set.sigwait_fd + 1;
4216 }
4217 }
4218#define fd_init_copy(f) do { \
4219 if (set.f) { \
4220 rb_fd_resize(set.max - 1, set.f); \
4221 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4222 rb_fd_init_copy(&set.orig_##f, set.f); \
4223 } \
4224 } \
4225 else { \
4226 rb_fd_no_init(&set.orig_##f); \
4227 } \
4228 } while (0)
4229 fd_init_copy(rset);
4230 fd_init_copy(wset);
4231 fd_init_copy(eset);
4232#undef fd_init_copy
4233
4234 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4235}
4236
4237#ifdef USE_POLL
4238
4239/* The same with linux kernel. TODO: make platform independent definition. */
4240#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4241#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4242#define POLLEX_SET (POLLPRI)
4243
4244#ifndef POLLERR_SET /* defined for FreeBSD for now */
4245# define POLLERR_SET (0)
4246#endif
4247
4248/*
4249 * returns a mask of events
4250 */
4251int
4252rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4253{
4254 struct pollfd fds[2];
4255 int result = 0;
4256 int drained;
4257 nfds_t nfds;
4259 struct waiting_fd wfd;
4260 int state;
4261 volatile int lerrno;
4262
4263 wfd.th = GET_THREAD();
4264 wfd.fd = fd;
4265
4266 RB_VM_LOCK_ENTER();
4267 {
4268 ccan_list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
4269 }
4270 RB_VM_LOCK_LEAVE();
4271
4272 EC_PUSH_TAG(wfd.th->ec);
4273 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4274 rb_hrtime_t *to, rel, end = 0;
4275 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4276 timeout_prepare(&to, &rel, &end, timeout);
4277 fds[0].fd = fd;
4278 fds[0].events = (short)events;
4279 fds[0].revents = 0;
4280 do {
4281 fds[1].fd = rb_sigwait_fd_get(wfd.th);
4282
4283 if (fds[1].fd >= 0) {
4284 fds[1].events = POLLIN;
4285 fds[1].revents = 0;
4286 nfds = 2;
4287 ubf = ubf_sigwait;
4288 }
4289 else {
4290 nfds = 1;
4291 ubf = ubf_select;
4292 }
4293
4294 lerrno = 0;
4295 BLOCKING_REGION(wfd.th, {
4296 const rb_hrtime_t *sto;
4297 struct timespec ts;
4298
4299 sto = sigwait_timeout(wfd.th, fds[1].fd, to, &drained);
4300 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4301 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), 0);
4302 if (result < 0) lerrno = errno;
4303 }
4304 }, ubf, wfd.th, TRUE);
4305
4306 if (fds[1].fd >= 0) {
4307 int fd1 = sigwait_signals_fd(result, fds[1].revents, fds[1].fd);
4308 (void)check_signals_nogvl(wfd.th, fd1);
4309 rb_sigwait_fd_put(wfd.th, fds[1].fd);
4310 rb_sigwait_fd_migrate(wfd.th->vm);
4311 }
4312 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4313 } while (wait_retryable(&result, lerrno, to, end));
4314 }
4315 EC_POP_TAG();
4316
4317 RB_VM_LOCK_ENTER();
4318 {
4319 ccan_list_del(&wfd.wfd_node);
4320 }
4321 RB_VM_LOCK_LEAVE();
4322
4323 if (state) {
4324 EC_JUMP_TAG(wfd.th->ec, state);
4325 }
4326
4327 if (result < 0) {
4328 errno = lerrno;
4329 return -1;
4330 }
4331
4332 if (fds[0].revents & POLLNVAL) {
4333 errno = EBADF;
4334 return -1;
4335 }
4336
4337 /*
4338 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4339 * Therefore we need to fix it up.
4340 */
4341 result = 0;
4342 if (fds[0].revents & POLLIN_SET)
4343 result |= RB_WAITFD_IN;
4344 if (fds[0].revents & POLLOUT_SET)
4345 result |= RB_WAITFD_OUT;
4346 if (fds[0].revents & POLLEX_SET)
4347 result |= RB_WAITFD_PRI;
4348
4349 /* all requested events are ready if there is an error */
4350 if (fds[0].revents & POLLERR_SET)
4351 result |= events;
4352
4353 return result;
4354}
4355#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4356struct select_args {
4357 union {
4358 int fd;
4359 int error;
4360 } as;
4361 rb_fdset_t *read;
4362 rb_fdset_t *write;
4363 rb_fdset_t *except;
4364 struct waiting_fd wfd;
4365 struct timeval *tv;
4366};
4367
4368static VALUE
4369select_single(VALUE ptr)
4370{
4371 struct select_args *args = (struct select_args *)ptr;
4372 int r;
4373
4374 r = rb_thread_fd_select(args->as.fd + 1,
4375 args->read, args->write, args->except, args->tv);
4376 if (r == -1)
4377 args->as.error = errno;
4378 if (r > 0) {
4379 r = 0;
4380 if (args->read && rb_fd_isset(args->as.fd, args->read))
4381 r |= RB_WAITFD_IN;
4382 if (args->write && rb_fd_isset(args->as.fd, args->write))
4383 r |= RB_WAITFD_OUT;
4384 if (args->except && rb_fd_isset(args->as.fd, args->except))
4385 r |= RB_WAITFD_PRI;
4386 }
4387 return (VALUE)r;
4388}
4389
4390static VALUE
4391select_single_cleanup(VALUE ptr)
4392{
4393 struct select_args *args = (struct select_args *)ptr;
4394
4395 RB_VM_LOCK_ENTER();
4396 {
4397 ccan_list_del(&args->wfd.wfd_node);
4398 }
4399 RB_VM_LOCK_LEAVE();
4400 if (args->read) rb_fd_term(args->read);
4401 if (args->write) rb_fd_term(args->write);
4402 if (args->except) rb_fd_term(args->except);
4403
4404 return (VALUE)-1;
4405}
4406
4407int
4408rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4409{
4410 rb_fdset_t rfds, wfds, efds;
4411 struct select_args args;
4412 int r;
4413 VALUE ptr = (VALUE)&args;
4414
4415 args.as.fd = fd;
4416 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4417 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4418 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4419 args.tv = timeout;
4420 args.wfd.fd = fd;
4421 args.wfd.th = GET_THREAD();
4422
4423 RB_VM_LOCK_ENTER();
4424 {
4425 ccan_list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4426 }
4427 RB_VM_LOCK_LEAVE();
4428
4429 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4430 if (r == -1)
4431 errno = args.as.error;
4432
4433 return r;
4434}
4435#endif /* ! USE_POLL */
4436
4437/*
4438 * for GC
4439 */
4440
4441#ifdef USE_CONSERVATIVE_STACK_END
4442void
4443rb_gc_set_stack_end(VALUE **stack_end_p)
4444{
4445 VALUE stack_end;
4446 *stack_end_p = &stack_end;
4447}
4448#endif
4449
4450/*
4451 *
4452 */
4453
4454void
4455rb_threadptr_check_signal(rb_thread_t *mth)
4456{
4457 /* mth must be main_thread */
4458 if (rb_signal_buff_size() > 0) {
4459 /* wakeup main thread */
4460 threadptr_trap_interrupt(mth);
4461 }
4462}
4463
4464static void
4465async_bug_fd(const char *mesg, int errno_arg, int fd)
4466{
4467 char buff[64];
4468 size_t n = strlcpy(buff, mesg, sizeof(buff));
4469 if (n < sizeof(buff)-3) {
4470 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4471 }
4472 rb_async_bug_errno(buff, errno_arg);
4473}
4474
4475/* VM-dependent API is not available for this function */
4476static int
4477consume_communication_pipe(int fd)
4478{
4479#if USE_EVENTFD
4480 uint64_t buff[1];
4481#else
4482 /* buffer can be shared because no one refers to them. */
4483 static char buff[1024];
4484#endif
4485 ssize_t result;
4486 int ret = FALSE; /* for rb_sigwait_sleep */
4487
4488 /*
4489 * disarm UBF_TIMER before we read, because it can become
4490 * re-armed at any time via sighandler and the pipe will refill
4491 * We can disarm it because this thread is now processing signals
4492 * and we do not want unnecessary SIGVTALRM
4493 */
4494 ubf_timer_disarm();
4495
4496 while (1) {
4497 result = read(fd, buff, sizeof(buff));
4498 if (result > 0) {
4499 ret = TRUE;
4500 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4501 return ret;
4502 }
4503 }
4504 else if (result == 0) {
4505 return ret;
4506 }
4507 else if (result < 0) {
4508 int e = errno;
4509 switch (e) {
4510 case EINTR:
4511 continue; /* retry */
4512 case EAGAIN:
4513#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4514 case EWOULDBLOCK:
4515#endif
4516 return ret;
4517 default:
4518 async_bug_fd("consume_communication_pipe: read", e, fd);
4519 }
4520 }
4521 }
4522}
4523
4524static int
4525check_signals_nogvl(rb_thread_t *th, int sigwait_fd)
4526{
4527 rb_vm_t *vm = GET_VM(); /* th may be 0 */
4528 int ret = sigwait_fd >= 0 ? consume_communication_pipe(sigwait_fd) : FALSE;
4529 ubf_wakeup_all_threads();
4530 ruby_sigchld_handler(vm);
4531 if (rb_signal_buff_size()) {
4532 if (th == vm->ractor.main_thread) {
4533 /* no need to lock + wakeup if already in main thread */
4534 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
4535 }
4536 else {
4537 threadptr_trap_interrupt(vm->ractor.main_thread);
4538 }
4539 ret = TRUE; /* for SIGCHLD_LOSSY && rb_sigwait_sleep */
4540 }
4541 return ret;
4542}
4543
4544void
4545rb_thread_stop_timer_thread(void)
4546{
4547 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4548 native_reset_timer_thread();
4549 }
4550}
4551
4552void
4553rb_thread_reset_timer_thread(void)
4554{
4555 native_reset_timer_thread();
4556}
4557
4558void
4559rb_thread_start_timer_thread(void)
4560{
4561 system_working = 1;
4562 rb_thread_create_timer_thread();
4563}
4564
4565static int
4566clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4567{
4568 int i;
4569 VALUE coverage = (VALUE)val;
4570 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4571 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4572
4573 if (lines) {
4574 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4575 rb_ary_clear(lines);
4576 }
4577 else {
4578 int i;
4579 for (i = 0; i < RARRAY_LEN(lines); i++) {
4580 if (RARRAY_AREF(lines, i) != Qnil)
4581 RARRAY_ASET(lines, i, INT2FIX(0));
4582 }
4583 }
4584 }
4585 if (branches) {
4586 VALUE counters = RARRAY_AREF(branches, 1);
4587 for (i = 0; i < RARRAY_LEN(counters); i++) {
4588 RARRAY_ASET(counters, i, INT2FIX(0));
4589 }
4590 }
4591
4592 return ST_CONTINUE;
4593}
4594
4595void
4596rb_clear_coverages(void)
4597{
4598 VALUE coverages = rb_get_coverages();
4599 if (RTEST(coverages)) {
4600 rb_hash_foreach(coverages, clear_coverage_i, 0);
4601 }
4602}
4603
4604#if defined(HAVE_WORKING_FORK)
4605
4606static void
4607rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4608{
4609 rb_thread_t *i = 0;
4610 rb_vm_t *vm = th->vm;
4611 rb_ractor_t *r = th->ractor;
4612 vm->ractor.main_ractor = r;
4613 vm->ractor.main_thread = th;
4614 r->threads.main = th;
4615 r->status_ = ractor_created;
4616
4617 thread_sched_atfork(TH_SCHED(th));
4618 ubf_list_atfork();
4619
4620 // OK. Only this thread accesses:
4621 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4622 ccan_list_for_each(&r->threads.set, i, lt_node) {
4623 atfork(i, th);
4624 }
4625 }
4626 rb_vm_living_threads_init(vm);
4627
4628 rb_ractor_atfork(vm, th);
4629
4630 /* may be held by MJIT threads in parent */
4631 rb_native_mutex_initialize(&vm->waitpid_lock);
4632 rb_native_mutex_initialize(&vm->workqueue_lock);
4633
4634 /* may be held by any thread in parent */
4635 rb_native_mutex_initialize(&th->interrupt_lock);
4636
4637 vm->fork_gen++;
4638 rb_ractor_sleeper_threads_clear(th->ractor);
4639 rb_clear_coverages();
4640
4641 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4642 VM_ASSERT(vm->ractor.cnt == 1);
4643}
4644
4645static void
4646terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4647{
4648 if (th != current_th) {
4649 rb_mutex_abandon_keeping_mutexes(th);
4650 rb_mutex_abandon_locking_mutex(th);
4651 thread_cleanup_func(th, TRUE);
4652 }
4653}
4654
4655void rb_fiber_atfork(rb_thread_t *);
4656void
4657rb_thread_atfork(void)
4658{
4659 rb_thread_t *th = GET_THREAD();
4660 rb_thread_atfork_internal(th, terminate_atfork_i);
4661 th->join_list = NULL;
4662 rb_fiber_atfork(th);
4663
4664 /* We don't want reproduce CVE-2003-0900. */
4666
4667 /* For child, starting MJIT worker thread in this place which is safer than immediately after `after_fork_ruby`. */
4668 mjit_child_after_fork();
4669}
4670
4671static void
4672terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4673{
4674 if (th != current_th) {
4675 thread_cleanup_func_before_exec(th);
4676 }
4677}
4678
4679void
4681{
4682 rb_thread_t *th = GET_THREAD();
4683 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4684}
4685#else
4686void
4688{
4689}
4690
4691void
4695#endif
4696
4697struct thgroup {
4698 int enclosed;
4699 VALUE group;
4700};
4701
4702static size_t
4703thgroup_memsize(const void *ptr)
4704{
4705 return sizeof(struct thgroup);
4706}
4707
4708static const rb_data_type_t thgroup_data_type = {
4709 "thgroup",
4710 {0, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,},
4711 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4712};
4713
4714/*
4715 * Document-class: ThreadGroup
4716 *
4717 * ThreadGroup provides a means of keeping track of a number of threads as a
4718 * group.
4719 *
4720 * A given Thread object can only belong to one ThreadGroup at a time; adding
4721 * a thread to a new group will remove it from any previous group.
4722 *
4723 * Newly created threads belong to the same group as the thread from which they
4724 * were created.
4725 */
4726
4727/*
4728 * Document-const: Default
4729 *
4730 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4731 * by default.
4732 */
4733static VALUE
4734thgroup_s_alloc(VALUE klass)
4735{
4736 VALUE group;
4737 struct thgroup *data;
4738
4739 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4740 data->enclosed = 0;
4741 data->group = group;
4742
4743 return group;
4744}
4745
4746/*
4747 * call-seq:
4748 * thgrp.list -> array
4749 *
4750 * Returns an array of all existing Thread objects that belong to this group.
4751 *
4752 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4753 */
4754
4755static VALUE
4756thgroup_list(VALUE group)
4757{
4758 VALUE ary = rb_ary_new();
4759 rb_thread_t *th = 0;
4760 rb_ractor_t *r = GET_RACTOR();
4761
4762 ccan_list_for_each(&r->threads.set, th, lt_node) {
4763 if (th->thgroup == group) {
4764 rb_ary_push(ary, th->self);
4765 }
4766 }
4767 return ary;
4768}
4769
4770
4771/*
4772 * call-seq:
4773 * thgrp.enclose -> thgrp
4774 *
4775 * Prevents threads from being added to or removed from the receiving
4776 * ThreadGroup.
4777 *
4778 * New threads can still be started in an enclosed ThreadGroup.
4779 *
4780 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4781 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4782 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4783 * tg.add thr
4784 * #=> ThreadError: can't move from the enclosed thread group
4785 */
4786
4787static VALUE
4788thgroup_enclose(VALUE group)
4789{
4790 struct thgroup *data;
4791
4792 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4793 data->enclosed = 1;
4794
4795 return group;
4796}
4797
4798
4799/*
4800 * call-seq:
4801 * thgrp.enclosed? -> true or false
4802 *
4803 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4804 */
4805
4806static VALUE
4807thgroup_enclosed_p(VALUE group)
4808{
4809 struct thgroup *data;
4810
4811 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4812 return RBOOL(data->enclosed);
4813}
4814
4815
4816/*
4817 * call-seq:
4818 * thgrp.add(thread) -> thgrp
4819 *
4820 * Adds the given +thread+ to this group, removing it from any other
4821 * group to which it may have previously been a member.
4822 *
4823 * puts "Initial group is #{ThreadGroup::Default.list}"
4824 * tg = ThreadGroup.new
4825 * t1 = Thread.new { sleep }
4826 * t2 = Thread.new { sleep }
4827 * puts "t1 is #{t1}"
4828 * puts "t2 is #{t2}"
4829 * tg.add(t1)
4830 * puts "Initial group now #{ThreadGroup::Default.list}"
4831 * puts "tg group now #{tg.list}"
4832 *
4833 * This will produce:
4834 *
4835 * Initial group is #<Thread:0x401bdf4c>
4836 * t1 is #<Thread:0x401b3c90>
4837 * t2 is #<Thread:0x401b3c18>
4838 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4839 * tg group now #<Thread:0x401b3c90>
4840 */
4841
4842static VALUE
4843thgroup_add(VALUE group, VALUE thread)
4844{
4845 rb_thread_t *target_th = rb_thread_ptr(thread);
4846 struct thgroup *data;
4847
4848 if (OBJ_FROZEN(group)) {
4849 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4850 }
4851 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4852 if (data->enclosed) {
4853 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4854 }
4855
4856 if (OBJ_FROZEN(target_th->thgroup)) {
4857 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4858 }
4859 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4860 if (data->enclosed) {
4862 "can't move from the enclosed thread group");
4863 }
4864
4865 target_th->thgroup = group;
4866 return group;
4867}
4868
4869/*
4870 * Document-class: ThreadShield
4871 */
4872static void
4873thread_shield_mark(void *ptr)
4874{
4875 rb_gc_mark((VALUE)ptr);
4876}
4877
4878static const rb_data_type_t thread_shield_data_type = {
4879 "thread_shield",
4880 {thread_shield_mark, 0, 0,},
4881 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4882};
4883
4884static VALUE
4885thread_shield_alloc(VALUE klass)
4886{
4887 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4888}
4889
4890#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4891#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4892#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4893#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4894STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4895static inline unsigned int
4896rb_thread_shield_waiting(VALUE b)
4897{
4898 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
4899}
4900
4901static inline void
4902rb_thread_shield_waiting_inc(VALUE b)
4903{
4904 unsigned int w = rb_thread_shield_waiting(b);
4905 w++;
4906 if (w > THREAD_SHIELD_WAITING_MAX)
4907 rb_raise(rb_eRuntimeError, "waiting count overflow");
4908 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4909 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4910}
4911
4912static inline void
4913rb_thread_shield_waiting_dec(VALUE b)
4914{
4915 unsigned int w = rb_thread_shield_waiting(b);
4916 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4917 w--;
4918 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4919 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4920}
4921
4922VALUE
4923rb_thread_shield_new(void)
4924{
4925 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4926 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4927 return thread_shield;
4928}
4929
4930bool
4931rb_thread_shield_owned(VALUE self)
4932{
4933 VALUE mutex = GetThreadShieldPtr(self);
4934 if (!mutex) return false;
4935
4936 rb_mutex_t *m = mutex_ptr(mutex);
4937
4938 return m->fiber == GET_EC()->fiber_ptr;
4939}
4940
4941/*
4942 * Wait a thread shield.
4943 *
4944 * Returns
4945 * true: acquired the thread shield
4946 * false: the thread shield was destroyed and no other threads waiting
4947 * nil: the thread shield was destroyed but still in use
4948 */
4949VALUE
4950rb_thread_shield_wait(VALUE self)
4951{
4952 VALUE mutex = GetThreadShieldPtr(self);
4953 rb_mutex_t *m;
4954
4955 if (!mutex) return Qfalse;
4956 m = mutex_ptr(mutex);
4957 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
4958 rb_thread_shield_waiting_inc(self);
4959 rb_mutex_lock(mutex);
4960 rb_thread_shield_waiting_dec(self);
4961 if (DATA_PTR(self)) return Qtrue;
4962 rb_mutex_unlock(mutex);
4963 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4964}
4965
4966static VALUE
4967thread_shield_get_mutex(VALUE self)
4968{
4969 VALUE mutex = GetThreadShieldPtr(self);
4970 if (!mutex)
4971 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
4972 return mutex;
4973}
4974
4975/*
4976 * Release a thread shield, and return true if it has waiting threads.
4977 */
4978VALUE
4979rb_thread_shield_release(VALUE self)
4980{
4981 VALUE mutex = thread_shield_get_mutex(self);
4982 rb_mutex_unlock(mutex);
4983 return RBOOL(rb_thread_shield_waiting(self) > 0);
4984}
4985
4986/*
4987 * Release and destroy a thread shield, and return true if it has waiting threads.
4988 */
4989VALUE
4990rb_thread_shield_destroy(VALUE self)
4991{
4992 VALUE mutex = thread_shield_get_mutex(self);
4993 DATA_PTR(self) = 0;
4994 rb_mutex_unlock(mutex);
4995 return RBOOL(rb_thread_shield_waiting(self) > 0);
4996}
4997
4998static VALUE
4999threadptr_recursive_hash(rb_thread_t *th)
5000{
5001 return th->ec->local_storage_recursive_hash;
5002}
5003
5004static void
5005threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5006{
5007 th->ec->local_storage_recursive_hash = hash;
5008}
5009
5011
5012/*
5013 * Returns the current "recursive list" used to detect recursion.
5014 * This list is a hash table, unique for the current thread and for
5015 * the current __callee__.
5016 */
5017
5018static VALUE
5019recursive_list_access(VALUE sym)
5020{
5021 rb_thread_t *th = GET_THREAD();
5022 VALUE hash = threadptr_recursive_hash(th);
5023 VALUE list;
5024 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5025 hash = rb_ident_hash_new();
5026 threadptr_recursive_hash_set(th, hash);
5027 list = Qnil;
5028 }
5029 else {
5030 list = rb_hash_aref(hash, sym);
5031 }
5032 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5033 list = rb_ident_hash_new();
5034 rb_hash_aset(hash, sym, list);
5035 }
5036 return list;
5037}
5038
5039/*
5040 * Returns Qtrue if and only if obj (or the pair <obj, paired_obj>) is already
5041 * in the recursion list.
5042 * Assumes the recursion list is valid.
5043 */
5044
5045static VALUE
5046recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5047{
5048#if SIZEOF_LONG == SIZEOF_VOIDP
5049 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5050#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5051 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5052 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5053#endif
5054
5055 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5056 if (UNDEF_P(pair_list))
5057 return Qfalse;
5058 if (paired_obj_id) {
5059 if (!RB_TYPE_P(pair_list, T_HASH)) {
5060 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5061 return Qfalse;
5062 }
5063 else {
5064 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5065 return Qfalse;
5066 }
5067 }
5068 return Qtrue;
5069}
5070
5071/*
5072 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5073 * For a single obj, it sets list[obj] to Qtrue.
5074 * For a pair, it sets list[obj] to paired_obj_id if possible,
5075 * otherwise list[obj] becomes a hash like:
5076 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5077 * Assumes the recursion list is valid.
5078 */
5079
5080static void
5081recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5082{
5083 VALUE pair_list;
5084
5085 if (!paired_obj) {
5086 rb_hash_aset(list, obj, Qtrue);
5087 }
5088 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5089 rb_hash_aset(list, obj, paired_obj);
5090 }
5091 else {
5092 if (!RB_TYPE_P(pair_list, T_HASH)){
5093 VALUE other_paired_obj = pair_list;
5094 pair_list = rb_hash_new();
5095 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5096 rb_hash_aset(list, obj, pair_list);
5097 }
5098 rb_hash_aset(pair_list, paired_obj, Qtrue);
5099 }
5100}
5101
5102/*
5103 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5104 * For a pair, if list[obj] is a hash, then paired_obj_id is
5105 * removed from the hash and no attempt is made to simplify
5106 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5107 * Assumes the recursion list is valid.
5108 */
5109
5110static int
5111recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5112{
5113 if (paired_obj) {
5114 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5115 if (UNDEF_P(pair_list)) {
5116 return 0;
5117 }
5118 if (RB_TYPE_P(pair_list, T_HASH)) {
5119 rb_hash_delete_entry(pair_list, paired_obj);
5120 if (!RHASH_EMPTY_P(pair_list)) {
5121 return 1; /* keep hash until is empty */
5122 }
5123 }
5124 }
5125 rb_hash_delete_entry(list, obj);
5126 return 1;
5127}
5128
5130 VALUE (*func) (VALUE, VALUE, int);
5131 VALUE list;
5132 VALUE obj;
5133 VALUE pairid;
5134 VALUE arg;
5135};
5136
5137static VALUE
5138exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5139{
5140 struct exec_recursive_params *p = (void *)data;
5141 return (*p->func)(p->obj, p->arg, FALSE);
5142}
5143
5144/*
5145 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5146 * current method is called recursively on obj, or on the pair <obj, pairid>
5147 * If outer is 0, then the innermost func will be called with recursive set
5148 * to Qtrue, otherwise the outermost func will be called. In the latter case,
5149 * all inner func are short-circuited by throw.
5150 * Implementation details: the value thrown is the recursive list which is
5151 * proper to the current method and unlikely to be caught anywhere else.
5152 * list[recursive_key] is used as a flag for the outermost call.
5153 */
5154
5155static VALUE
5156exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5157{
5158 VALUE result = Qundef;
5159 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5160 struct exec_recursive_params p;
5161 int outermost;
5162 p.list = recursive_list_access(sym);
5163 p.obj = obj;
5164 p.pairid = pairid;
5165 p.arg = arg;
5166 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5167
5168 if (recursive_check(p.list, p.obj, pairid)) {
5169 if (outer && !outermost) {
5170 rb_throw_obj(p.list, p.list);
5171 }
5172 return (*func)(obj, arg, TRUE);
5173 }
5174 else {
5175 enum ruby_tag_type state;
5176
5177 p.func = func;
5178
5179 if (outermost) {
5180 recursive_push(p.list, ID2SYM(recursive_key), 0);
5181 recursive_push(p.list, p.obj, p.pairid);
5182 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5183 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5184 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5185 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5186 if (result == p.list) {
5187 result = (*func)(obj, arg, TRUE);
5188 }
5189 }
5190 else {
5191 volatile VALUE ret = Qundef;
5192 recursive_push(p.list, p.obj, p.pairid);
5193 EC_PUSH_TAG(GET_EC());
5194 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5195 ret = (*func)(obj, arg, FALSE);
5196 }
5197 EC_POP_TAG();
5198 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5199 goto invalid;
5200 }
5201 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5202 result = ret;
5203 }
5204 }
5205 *(volatile struct exec_recursive_params *)&p;
5206 return result;
5207
5208 invalid:
5209 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5210 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5211 sym, rb_thread_current());
5213}
5214
5215/*
5216 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5217 * current method is called recursively on obj
5218 */
5219
5220VALUE
5221rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5222{
5223 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5224}
5225
5226/*
5227 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5228 * current method is called recursively on the ordered pair <obj, paired_obj>
5229 */
5230
5231VALUE
5232rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5233{
5234 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5235}
5236
5237/*
5238 * If recursion is detected on the current method and obj, the outermost
5239 * func will be called with (obj, arg, Qtrue). All inner func will be
5240 * short-circuited using throw.
5241 */
5242
5243VALUE
5244rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5245{
5246 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5247}
5248
5249VALUE
5250rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5251{
5252 return exec_recursive(func, obj, 0, arg, 1, mid);
5253}
5254
5255/*
5256 * If recursion is detected on the current method, obj and paired_obj,
5257 * the outermost func will be called with (obj, arg, Qtrue). All inner
5258 * func will be short-circuited using throw.
5259 */
5260
5261VALUE
5262rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5263{
5264 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5265}
5266
5267/*
5268 * call-seq:
5269 * thread.backtrace -> array or nil
5270 *
5271 * Returns the current backtrace of the target thread.
5272 *
5273 */
5274
5275static VALUE
5276rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5277{
5278 return rb_vm_thread_backtrace(argc, argv, thval);
5279}
5280
5281/* call-seq:
5282 * thread.backtrace_locations(*args) -> array or nil
5283 *
5284 * Returns the execution stack for the target thread---an array containing
5285 * backtrace location objects.
5286 *
5287 * See Thread::Backtrace::Location for more information.
5288 *
5289 * This method behaves similarly to Kernel#caller_locations except it applies
5290 * to a specific thread.
5291 */
5292static VALUE
5293rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5294{
5295 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5296}
5297
5298void
5299Init_Thread_Mutex(void)
5300{
5301 rb_thread_t *th = GET_THREAD();
5302
5303 rb_native_mutex_initialize(&th->vm->waitpid_lock);
5304 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5305 rb_native_mutex_initialize(&th->interrupt_lock);
5306}
5307
5308/*
5309 * Document-class: ThreadError
5310 *
5311 * Raised when an invalid operation is attempted on a thread.
5312 *
5313 * For example, when no other thread has been started:
5314 *
5315 * Thread.stop
5316 *
5317 * This will raises the following exception:
5318 *
5319 * ThreadError: stopping only thread
5320 * note: use sleep to stop forever
5321 */
5322
5323void
5324Init_Thread(void)
5325{
5326 VALUE cThGroup;
5327 rb_thread_t *th = GET_THREAD();
5328
5329 sym_never = ID2SYM(rb_intern_const("never"));
5330 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5331 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5332
5333 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5334 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5335 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5336 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5337 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5338 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5339 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5340 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5341 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5342 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5343 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5344 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5345 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5346 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5347 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5348 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5349 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5350 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5351 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5352
5353 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5354 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5355 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5356 rb_define_method(rb_cThread, "value", thread_value, 0);
5357 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5358 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5359 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5360 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5361 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5362 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5363 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5364 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5365 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5366 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5367 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5368 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5369 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5370 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5371 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5372 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5373 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5374 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5375 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5376 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5377 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5378 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5379 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5380 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5381 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5382 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5383
5384 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5385 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5386 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5387 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5388 rb_define_alias(rb_cThread, "inspect", "to_s");
5389
5390 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5391 "stream closed in another thread");
5392
5393 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5394 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5395 rb_define_method(cThGroup, "list", thgroup_list, 0);
5396 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5397 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5398 rb_define_method(cThGroup, "add", thgroup_add, 1);
5399
5400 {
5401 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5402 rb_define_const(cThGroup, "Default", th->thgroup);
5403 }
5404
5406
5407 /* init thread core */
5408 {
5409 /* main thread setting */
5410 {
5411 /* acquire global vm lock */
5412 struct rb_thread_sched *sched = TH_SCHED(th);
5413 thread_sched_to_running(sched, th);
5414
5415 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5416 th->pending_interrupt_queue_checked = 0;
5417 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5418 }
5419 }
5420
5421 rb_thread_create_timer_thread();
5422
5423 Init_thread_sync();
5424}
5425
5426int
5428{
5429 rb_thread_t *th = ruby_thread_from_native();
5430
5431 return th != 0;
5432}
5433
5434#ifdef NON_SCALAR_THREAD_ID
5435 #define thread_id_str(th) (NULL)
5436#else
5437 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5438#endif
5439
5440static void
5441debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5442{
5443 rb_thread_t *th = 0;
5444 VALUE sep = rb_str_new_cstr("\n ");
5445
5446 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5447 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5448 (void *)GET_THREAD(), (void *)r->threads.main);
5449
5450 ccan_list_for_each(&r->threads.set, th, lt_node) {
5451 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5452 "native:%p int:%u",
5453 th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
5454
5455 if (th->locking_mutex) {
5456 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5457 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5458 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5459 }
5460
5461 {
5462 struct rb_waiting_list *list = th->join_list;
5463 while (list) {
5464 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5465 list = list->next;
5466 }
5467 }
5468 rb_str_catf(msg, "\n ");
5469 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5470 rb_str_catf(msg, "\n");
5471 }
5472}
5473
5474static void
5475rb_check_deadlock(rb_ractor_t *r)
5476{
5477 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5478
5479 int found = 0;
5480 rb_thread_t *th = NULL;
5481 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5482 int ltnum = rb_ractor_living_thread_num(r);
5483
5484 if (ltnum > sleeper_num) return;
5485 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5486 if (patrol_thread && patrol_thread != GET_THREAD()) return;
5487
5488 ccan_list_for_each(&r->threads.set, th, lt_node) {
5489 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5490 found = 1;
5491 }
5492 else if (th->locking_mutex) {
5493 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5494 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5495 found = 1;
5496 }
5497 }
5498 if (found)
5499 break;
5500 }
5501
5502 if (!found) {
5503 VALUE argv[2];
5504 argv[0] = rb_eFatal;
5505 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5506 debug_deadlock_check(r, argv[1]);
5507 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5508 rb_threadptr_raise(r->threads.main, 2, argv);
5509 }
5510}
5511
5512// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5513// structs. Defined here because the struct definition lives here as well.
5514size_t
5515rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5516{
5517 struct waiting_fd *waitfd = 0;
5518 size_t size = 0;
5519
5520 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5521 size += sizeof(struct waiting_fd);
5522 }
5523
5524 return size;
5525}
5526
5527static void
5528update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5529{
5530 const rb_control_frame_t *cfp = GET_EC()->cfp;
5531 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5532 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5533 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5534 if (lines) {
5535 long line = rb_sourceline() - 1;
5536 long count;
5537 VALUE num;
5538 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5539 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5540 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5541 rb_ary_push(lines, LONG2FIX(line + 1));
5542 return;
5543 }
5544 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5545 return;
5546 }
5547 num = RARRAY_AREF(lines, line);
5548 if (!FIXNUM_P(num)) return;
5549 count = FIX2LONG(num) + 1;
5550 if (POSFIXABLE(count)) {
5551 RARRAY_ASET(lines, line, LONG2FIX(count));
5552 }
5553 }
5554 }
5555}
5556
5557static void
5558update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5559{
5560 const rb_control_frame_t *cfp = GET_EC()->cfp;
5561 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5562 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5563 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5564 if (branches) {
5565 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5566 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5567 VALUE counters = RARRAY_AREF(branches, 1);
5568 VALUE num = RARRAY_AREF(counters, idx);
5569 count = FIX2LONG(num) + 1;
5570 if (POSFIXABLE(count)) {
5571 RARRAY_ASET(counters, idx, LONG2FIX(count));
5572 }
5573 }
5574 }
5575}
5576
5577const rb_method_entry_t *
5578rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5579{
5580 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5581
5582 if (!me->def) return NULL; // negative cme
5583
5584 retry:
5585 switch (me->def->type) {
5586 case VM_METHOD_TYPE_ISEQ: {
5587 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5588 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5589 path = rb_iseq_path(iseq);
5590 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5591 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5592 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5593 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5594 break;
5595 }
5596 case VM_METHOD_TYPE_BMETHOD: {
5597 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5598 if (iseq) {
5599 rb_iseq_location_t *loc;
5600 rb_iseq_check(iseq);
5601 path = rb_iseq_path(iseq);
5602 loc = &ISEQ_BODY(iseq)->location;
5603 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5604 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5605 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5606 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5607 break;
5608 }
5609 return NULL;
5610 }
5611 case VM_METHOD_TYPE_ALIAS:
5612 me = me->def->body.alias.original_me;
5613 goto retry;
5614 case VM_METHOD_TYPE_REFINED:
5615 me = me->def->body.refined.orig_me;
5616 if (!me) return NULL;
5617 goto retry;
5618 default:
5619 return NULL;
5620 }
5621
5622 /* found */
5623 if (RB_TYPE_P(path, T_ARRAY)) {
5624 path = rb_ary_entry(path, 1);
5625 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5626 }
5627 if (resolved_location) {
5628 resolved_location[0] = path;
5629 resolved_location[1] = beg_pos_lineno;
5630 resolved_location[2] = beg_pos_column;
5631 resolved_location[3] = end_pos_lineno;
5632 resolved_location[4] = end_pos_column;
5633 }
5634 return me;
5635}
5636
5637static void
5638update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5639{
5640 const rb_control_frame_t *cfp = GET_EC()->cfp;
5641 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5642 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5643 VALUE rcount;
5644 long count;
5645
5646 me = rb_resolve_me_location(me, 0);
5647 if (!me) return;
5648
5649 rcount = rb_hash_aref(me2counter, (VALUE) me);
5650 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5651 if (POSFIXABLE(count)) {
5652 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5653 }
5654}
5655
5656VALUE
5657rb_get_coverages(void)
5658{
5659 return GET_VM()->coverages;
5660}
5661
5662int
5663rb_get_coverage_mode(void)
5664{
5665 return GET_VM()->coverage_mode;
5666}
5667
5668void
5669rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5670{
5671 GET_VM()->coverages = coverages;
5672 GET_VM()->me2counter = me2counter;
5673 GET_VM()->coverage_mode = mode;
5674}
5675
5676void
5677rb_resume_coverages(void)
5678{
5679 int mode = GET_VM()->coverage_mode;
5680 VALUE me2counter = GET_VM()->me2counter;
5681 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5682 if (mode & COVERAGE_TARGET_BRANCHES) {
5683 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5684 }
5685 if (mode & COVERAGE_TARGET_METHODS) {
5686 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5687 }
5688}
5689
5690void
5691rb_suspend_coverages(void)
5692{
5693 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5694 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5695 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5696 }
5697 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5698 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5699 }
5700}
5701
5702/* Make coverage arrays empty so old covered files are no longer tracked. */
5703void
5704rb_reset_coverages(void)
5705{
5706 rb_clear_coverages();
5707 rb_iseq_remove_coverage_all();
5708 GET_VM()->coverages = Qfalse;
5709}
5710
5711VALUE
5712rb_default_coverage(int n)
5713{
5714 VALUE coverage = rb_ary_hidden_new_fill(3);
5715 VALUE lines = Qfalse, branches = Qfalse;
5716 int mode = GET_VM()->coverage_mode;
5717
5718 if (mode & COVERAGE_TARGET_LINES) {
5719 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5720 }
5721 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5722
5723 if (mode & COVERAGE_TARGET_BRANCHES) {
5724 branches = rb_ary_hidden_new_fill(2);
5725 /* internal data structures for branch coverage:
5726 *
5727 * { branch base node =>
5728 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5729 * branch target id =>
5730 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5731 * ...
5732 * }],
5733 * ...
5734 * }
5735 *
5736 * Example:
5737 * { NODE_CASE =>
5738 * [1, 0, 4, 3, {
5739 * NODE_WHEN => [2, 8, 2, 9, 0],
5740 * NODE_WHEN => [3, 8, 3, 9, 1],
5741 * ...
5742 * }],
5743 * ...
5744 * }
5745 */
5746 VALUE structure = rb_hash_new();
5747 rb_obj_hide(structure);
5748 RARRAY_ASET(branches, 0, structure);
5749 /* branch execution counters */
5750 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5751 }
5752 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5753
5754 return coverage;
5755}
5756
5757static VALUE
5758uninterruptible_exit(VALUE v)
5759{
5760 rb_thread_t *cur_th = GET_THREAD();
5761 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5762
5763 cur_th->pending_interrupt_queue_checked = 0;
5764 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5765 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5766 }
5767 return Qnil;
5768}
5769
5770VALUE
5771rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5772{
5773 VALUE interrupt_mask = rb_ident_hash_new();
5774 rb_thread_t *cur_th = GET_THREAD();
5775
5776 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5777 OBJ_FREEZE_RAW(interrupt_mask);
5778 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5779
5780 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5781
5782 RUBY_VM_CHECK_INTS(cur_th->ec);
5783 return ret;
5784}
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:85
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:293
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:53
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:115
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:103
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:37
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:54
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implenentation detail of RB_FL_SET().
Definition fl_type.h:638
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:923
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2284
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1098
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:881
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:868
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:145
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE_RAW
Old name of RB_OBJ_FREEZE_RAW.
Definition fl_type.h:144
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:298
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:470
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition error.c:3150
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:688
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition error.c:794
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1084
VALUE rb_eIOError
IOError exception.
Definition io.c:182
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1088
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1091
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:3472
VALUE rb_eFatal
fatal exception.
Definition error.c:1087
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1089
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
Definition error.c:411
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1129
VALUE rb_eArgError
ArgumentError exception.
Definition error.c:1092
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:886
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4520
VALUE rb_eSignal
SignalException exception.
Definition error.c:1086
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:1939
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:84
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:190
VALUE rb_cThread
Thread class.
Definition vm.c:466
VALUE rb_cModule
Module class.
Definition object.c:53
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3623
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:787
static const char * rb_enc_name(rb_encoding *enc)
Queries the (canonical) name of the passed encoding.
Definition encoding.h:433
static bool rb_enc_asciicompat(rb_encoding *enc)
Queries if the passed encoding is in some sense compatible with ASCII.
Definition encoding.h:784
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:280
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:848
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1776
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1382
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3453
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1441
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3382
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2575
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:2814
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1365
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2515
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1409
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2726
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4692
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1424
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2717
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2670
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1372
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4687
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2793
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3654
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3530
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1472
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2679
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1447
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1931
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2853
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1606
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1218
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:188
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1702
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:276
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1084
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:11918
ID rb_to_id(VALUE str)
Identical to rb_intern(), except it takes an instance of rb_cString.
Definition string.c:11908
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3440
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:183
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1528
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1761
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1659
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition sprintf.c:1219
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition sprintf.c:1242
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1357
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2277
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:366
#define ALLOCA_N(type, n)
Definition memory.h:286
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:68
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
Definition rarray.h:70
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:343
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:566
#define RARRAY_AREF(a, i)
Definition rarray.h:583
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:69
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:92
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:95
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:507
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:441
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:489
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5427
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1045
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:203
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:367
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:165
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:386
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4186
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:62
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:190
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Definition method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:299
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:305
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:287
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:293
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:375