Ruby 3.2.3p157 (2024-01-18 revision 52bb2ac0a6971d0391efa2275f7a66bff319087c)
vm_trace.c
1/**********************************************************************
2
3 vm_trace.c -
4
5 $Author: ko1 $
6 created at: Tue Aug 14 19:37:09 2012
7
8 Copyright (C) 1993-2012 Yukihiro Matsumoto
9
10**********************************************************************/
11
12/*
13 * This file include two parts:
14 *
15 * (1) set_trace_func internal mechanisms
16 * and C level API
17 *
18 * (2) Ruby level API
19 * (2-1) set_trace_func API
20 * (2-2) TracePoint API (not yet)
21 *
22 */
23
24#include "eval_intern.h"
25#include "internal.h"
26#include "internal/hash.h"
27#include "internal/symbol.h"
28#include "iseq.h"
29#include "mjit.h"
30#include "ruby/debug.h"
31#include "vm_core.h"
32#include "ruby/ractor.h"
33#include "yjit.h"
34
35#include "builtin.h"
36
37static VALUE sym_default;
38
39/* (1) trace mechanisms */
40
41typedef struct rb_event_hook_struct {
42 rb_event_hook_flag_t hook_flags;
43 rb_event_flag_t events;
45 VALUE data;
46 struct rb_event_hook_struct *next;
47
48 struct {
49 rb_thread_t *th;
50 unsigned int target_line;
51 } filter;
53
54typedef void (*rb_event_hook_raw_arg_func_t)(VALUE data, const rb_trace_arg_t *arg);
55
56#define MAX_EVENT_NUM 32
57
58void
59rb_hook_list_mark(rb_hook_list_t *hooks)
60{
61 rb_event_hook_t *hook = hooks->hooks;
62
63 while (hook) {
64 rb_gc_mark(hook->data);
65 hook = hook->next;
66 }
67}
68
69static void clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list);
70
71void
72rb_hook_list_free(rb_hook_list_t *hooks)
73{
74 hooks->need_clean = true;
75
76 if (hooks->running == 0) {
77 clean_hooks(GET_EC(), hooks);
78 }
79}
80
81/* ruby_vm_event_flags management */
82
83void rb_clear_attr_ccs(void);
84
85static void
86update_global_event_hook(rb_event_flag_t prev_events, rb_event_flag_t new_events)
87{
88 rb_event_flag_t new_iseq_events = new_events & ISEQ_TRACE_EVENTS;
89 rb_event_flag_t enabled_iseq_events = ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS;
90 bool first_time_iseq_events_p = new_iseq_events & ~enabled_iseq_events;
91 bool enable_c_call = (prev_events & RUBY_EVENT_C_CALL) == 0 && (new_events & RUBY_EVENT_C_CALL);
92 bool enable_c_return = (prev_events & RUBY_EVENT_C_RETURN) == 0 && (new_events & RUBY_EVENT_C_RETURN);
93
94 // Modify ISEQs or CCs to enable tracing
95 if (first_time_iseq_events_p) {
96 // write all ISeqs only when new events are added for the first time
97 rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
98 }
99 // if c_call or c_return is activated
100 else if (enable_c_call || enable_c_return) {
101 rb_clear_attr_ccs();
102 }
103
104 ruby_vm_event_flags = new_events;
105 ruby_vm_event_enabled_global_flags |= new_events;
106 rb_objspace_set_event_hook(new_events);
107
108 // Invalidate JIT code as needed
109 if (first_time_iseq_events_p || enable_c_call || enable_c_return) {
110 // Invalidate all code when ISEQs are modified to use trace_* insns above.
111 // Also invalidate when enabling c_call or c_return because generated code
112 // never fires these events.
113 // Internal events fire inside C routines so don't need special handling.
114 // Do this after event flags updates so other ractors see updated vm events
115 // when they wake up.
116 rb_yjit_tracing_invalidate_all();
117 rb_mjit_tracing_invalidate_all(new_iseq_events);
118 }
119}
120
121/* add/remove hooks */
122
123static rb_event_hook_t *
124alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
125{
126 rb_event_hook_t *hook;
127
128 if ((events & RUBY_INTERNAL_EVENT_MASK) && (events & ~RUBY_INTERNAL_EVENT_MASK)) {
129 rb_raise(rb_eTypeError, "Can not specify normal event and internal event simultaneously.");
130 }
131
132 hook = ALLOC(rb_event_hook_t);
133 hook->hook_flags = hook_flags;
134 hook->events = events;
135 hook->func = func;
136 hook->data = data;
137
138 /* no filters */
139 hook->filter.th = NULL;
140 hook->filter.target_line = 0;
141
142 return hook;
143}
144
145static void
146hook_list_connect(VALUE list_owner, rb_hook_list_t *list, rb_event_hook_t *hook, int global_p)
147{
148 rb_event_flag_t prev_events = list->events;
149 hook->next = list->hooks;
150 list->hooks = hook;
151 list->events |= hook->events;
152
153 if (global_p) {
154 /* global hooks are root objects at GC mark. */
155 update_global_event_hook(prev_events, list->events);
156 }
157 else {
158 RB_OBJ_WRITTEN(list_owner, Qundef, hook->data);
159 }
160}
161
162static void
163connect_event_hook(const rb_execution_context_t *ec, rb_event_hook_t *hook)
164{
165 rb_hook_list_t *list = rb_ec_ractor_hooks(ec);
166 hook_list_connect(Qundef, list, hook, TRUE);
167}
168
169static void
170rb_threadptr_add_event_hook(const rb_execution_context_t *ec, rb_thread_t *th,
171 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
172{
173 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
174 hook->filter.th = th;
175 connect_event_hook(ec, hook);
176}
177
178void
180{
181 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
182}
183
184void
186{
187 rb_add_event_hook2(func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
188}
189
190void
191rb_thread_add_event_hook2(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
192{
193 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, hook_flags);
194}
195
196void
197rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
198{
199 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
200 connect_event_hook(GET_EC(), hook);
201}
202
203static void
204clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list)
205{
206 rb_event_hook_t *hook, **nextp = &list->hooks;
207 rb_event_flag_t prev_events = list->events;
208
209 VM_ASSERT(list->running == 0);
210 VM_ASSERT(list->need_clean == true);
211
212 list->events = 0;
213 list->need_clean = false;
214
215 while ((hook = *nextp) != 0) {
216 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
217 *nextp = hook->next;
218 xfree(hook);
219 }
220 else {
221 list->events |= hook->events; /* update active events */
222 nextp = &hook->next;
223 }
224 }
225
226 if (list->is_local) {
227 if (list->events == 0) {
228 /* local events */
229 ruby_xfree(list);
230 }
231 }
232 else {
233 update_global_event_hook(prev_events, list->events);
234 }
235}
236
237static void
238clean_hooks_check(const rb_execution_context_t *ec, rb_hook_list_t *list)
239{
240 if (UNLIKELY(list->need_clean)) {
241 if (list->running == 0) {
242 clean_hooks(ec, list);
243 }
244 }
245}
246
247#define MATCH_ANY_FILTER_TH ((rb_thread_t *)1)
248
249/* if func is 0, then clear all funcs */
250static int
251remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
252{
253 rb_hook_list_t *list = rb_ec_ractor_hooks(ec);
254 int ret = 0;
255 rb_event_hook_t *hook = list->hooks;
256
257 while (hook) {
258 if (func == 0 || hook->func == func) {
259 if (hook->filter.th == filter_th || filter_th == MATCH_ANY_FILTER_TH) {
260 if (UNDEF_P(data) || hook->data == data) {
261 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
262 ret+=1;
263 list->need_clean = true;
264 }
265 }
266 }
267 hook = hook->next;
268 }
269
270 clean_hooks_check(ec, list);
271 return ret;
272}
273
274static int
275rb_threadptr_remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
276{
277 return remove_event_hook(ec, filter_th, func, data);
278}
279
280int
282{
283 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, Qundef);
284}
285
286int
288{
289 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, data);
290}
291
292int
294{
295 return remove_event_hook(GET_EC(), NULL, func, Qundef);
296}
297
298int
300{
301 return remove_event_hook(GET_EC(), NULL, func, data);
302}
303
304void
305rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec)
306{
307 rb_threadptr_remove_event_hook(ec, rb_ec_thread_ptr(ec), 0, Qundef);
308}
309
310void
311rb_ec_clear_all_trace_func(const rb_execution_context_t *ec)
312{
313 rb_threadptr_remove_event_hook(ec, MATCH_ANY_FILTER_TH, 0, Qundef);
314}
315
316/* invoke hooks */
317
318static void
319exec_hooks_body(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
320{
321 rb_event_hook_t *hook;
322
323 for (hook = list->hooks; hook; hook = hook->next) {
324 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) &&
325 (trace_arg->event & hook->events) &&
326 (LIKELY(hook->filter.th == 0) || hook->filter.th == rb_ec_thread_ptr(ec)) &&
327 (LIKELY(hook->filter.target_line == 0) || (hook->filter.target_line == (unsigned int)rb_vm_get_sourceline(ec->cfp)))) {
328 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_RAW_ARG)) {
329 (*hook->func)(trace_arg->event, hook->data, trace_arg->self, trace_arg->id, trace_arg->klass);
330 }
331 else {
332 (*((rb_event_hook_raw_arg_func_t)hook->func))(hook->data, trace_arg);
333 }
334 }
335 }
336}
337
338static int
339exec_hooks_precheck(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
340{
341 if (list->events & trace_arg->event) {
342 list->running++;
343 return TRUE;
344 }
345 else {
346 return FALSE;
347 }
348}
349
350static void
351exec_hooks_postcheck(const rb_execution_context_t *ec, rb_hook_list_t *list)
352{
353 list->running--;
354 clean_hooks_check(ec, list);
355}
356
357static void
358exec_hooks_unprotected(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
359{
360 if (exec_hooks_precheck(ec, list, trace_arg) == 0) return;
361 exec_hooks_body(ec, list, trace_arg);
362 exec_hooks_postcheck(ec, list);
363}
364
365static int
366exec_hooks_protected(rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
367{
368 enum ruby_tag_type state;
369 volatile int raised;
370
371 if (exec_hooks_precheck(ec, list, trace_arg) == 0) return 0;
372
373 raised = rb_ec_reset_raised(ec);
374
375 /* TODO: Support !RUBY_EVENT_HOOK_FLAG_SAFE hooks */
376
377 EC_PUSH_TAG(ec);
378 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
379 exec_hooks_body(ec, list, trace_arg);
380 }
381 EC_POP_TAG();
382
383 exec_hooks_postcheck(ec, list);
384
385 if (raised) {
386 rb_ec_set_raised(ec);
387 }
388
389 return state;
390}
391
392// pop_p: Whether to pop the frame for the TracePoint when it throws.
393MJIT_FUNC_EXPORTED void
394rb_exec_event_hooks(rb_trace_arg_t *trace_arg, rb_hook_list_t *hooks, int pop_p)
395{
396 rb_execution_context_t *ec = trace_arg->ec;
397
398 if (UNLIKELY(trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
399 if (ec->trace_arg && (ec->trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
400 /* skip hooks because this thread doing INTERNAL_EVENT */
401 }
402 else {
403 rb_trace_arg_t *prev_trace_arg = ec->trace_arg;
404
405 ec->trace_arg = trace_arg;
406 /* only global hooks */
407 exec_hooks_unprotected(ec, rb_ec_ractor_hooks(ec), trace_arg);
408 ec->trace_arg = prev_trace_arg;
409 }
410 }
411 else {
412 if (ec->trace_arg == NULL && /* check reentrant */
413 trace_arg->self != rb_mRubyVMFrozenCore /* skip special methods. TODO: remove it. */) {
414 const VALUE errinfo = ec->errinfo;
415 const VALUE old_recursive = ec->local_storage_recursive_hash;
416 int state = 0;
417
418 /* setup */
419 ec->local_storage_recursive_hash = ec->local_storage_recursive_hash_for_trace;
420 ec->errinfo = Qnil;
421 ec->trace_arg = trace_arg;
422
423 /* kick hooks */
424 if ((state = exec_hooks_protected(ec, hooks, trace_arg)) == TAG_NONE) {
425 ec->errinfo = errinfo;
426 }
427
428 /* cleanup */
429 ec->trace_arg = NULL;
430 ec->local_storage_recursive_hash_for_trace = ec->local_storage_recursive_hash;
431 ec->local_storage_recursive_hash = old_recursive;
432
433 if (state) {
434 if (pop_p) {
435 if (VM_FRAME_FINISHED_P(ec->cfp)) {
436 ec->tag = ec->tag->prev;
437 }
438 rb_vm_pop_frame(ec);
439 }
440 EC_JUMP_TAG(ec, state);
441 }
442 }
443 }
444}
445
446VALUE
447rb_suppress_tracing(VALUE (*func)(VALUE), VALUE arg)
448{
449 volatile int raised;
450 volatile VALUE result = Qnil;
451 rb_execution_context_t *const ec = GET_EC();
452 rb_vm_t *const vm = rb_ec_vm_ptr(ec);
453 enum ruby_tag_type state;
454 rb_trace_arg_t dummy_trace_arg;
455 dummy_trace_arg.event = 0;
456
457 if (!ec->trace_arg) {
458 ec->trace_arg = &dummy_trace_arg;
459 }
460
461 raised = rb_ec_reset_raised(ec);
462
463 EC_PUSH_TAG(ec);
464 if (LIKELY((state = EC_EXEC_TAG()) == TAG_NONE)) {
465 result = (*func)(arg);
466 }
467 else {
468 (void)*&vm; /* suppress "clobbered" warning */
469 }
470 EC_POP_TAG();
471
472 if (raised) {
473 rb_ec_reset_raised(ec);
474 }
475
476 if (ec->trace_arg == &dummy_trace_arg) {
477 ec->trace_arg = NULL;
478 }
479
480 if (state) {
481#if defined RUBY_USE_SETJMPEX && RUBY_USE_SETJMPEX
482 RB_GC_GUARD(result);
483#endif
484 EC_JUMP_TAG(ec, state);
485 }
486
487 return result;
488}
489
490static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
491
492/* (2-1) set_trace_func (old API) */
493
494/*
495 * call-seq:
496 * set_trace_func(proc) -> proc
497 * set_trace_func(nil) -> nil
498 *
499 * Establishes _proc_ as the handler for tracing, or disables
500 * tracing if the parameter is +nil+.
501 *
502 * *Note:* this method is obsolete, please use TracePoint instead.
503 *
504 * _proc_ takes up to six parameters:
505 *
506 * * an event name
507 * * a filename
508 * * a line number
509 * * an object id
510 * * a binding
511 * * the name of a class
512 *
513 * _proc_ is invoked whenever an event occurs.
514 *
515 * Events are:
516 *
517 * +c-call+:: call a C-language routine
518 * +c-return+:: return from a C-language routine
519 * +call+:: call a Ruby method
520 * +class+:: start a class or module definition
521 * +end+:: finish a class or module definition
522 * +line+:: execute code on a new line
523 * +raise+:: raise an exception
524 * +return+:: return from a Ruby method
525 *
526 * Tracing is disabled within the context of _proc_.
527 *
528 * class Test
529 * def test
530 * a = 1
531 * b = 2
532 * end
533 * end
534 *
535 * set_trace_func proc { |event, file, line, id, binding, classname|
536 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
537 * }
538 * t = Test.new
539 * t.test
540 *
541 * line prog.rb:11 false
542 * c-call prog.rb:11 new Class
543 * c-call prog.rb:11 initialize Object
544 * c-return prog.rb:11 initialize Object
545 * c-return prog.rb:11 new Class
546 * line prog.rb:12 false
547 * call prog.rb:2 test Test
548 * line prog.rb:3 test Test
549 * line prog.rb:4 test Test
550 * return prog.rb:4 test Test
551 *
552 * Note that for +c-call+ and +c-return+ events, the binding returned is the
553 * binding of the nearest Ruby method calling the C method, since C methods
554 * themselves do not have bindings.
555 */
556
557static VALUE
558set_trace_func(VALUE obj, VALUE trace)
559{
560 rb_remove_event_hook(call_trace_func);
561
562 if (NIL_P(trace)) {
563 return Qnil;
564 }
565
566 if (!rb_obj_is_proc(trace)) {
567 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
568 }
569
570 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
571 return trace;
572}
573
574static void
575thread_add_trace_func(rb_execution_context_t *ec, rb_thread_t *filter_th, VALUE trace)
576{
577 if (!rb_obj_is_proc(trace)) {
578 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
579 }
580
581 rb_threadptr_add_event_hook(ec, filter_th, call_trace_func, RUBY_EVENT_ALL, trace, RUBY_EVENT_HOOK_FLAG_SAFE);
582}
583
584/*
585 * call-seq:
586 * thr.add_trace_func(proc) -> proc
587 *
588 * Adds _proc_ as a handler for tracing.
589 *
590 * See Thread#set_trace_func and Kernel#set_trace_func.
591 */
592
593static VALUE
594thread_add_trace_func_m(VALUE obj, VALUE trace)
595{
596 thread_add_trace_func(GET_EC(), rb_thread_ptr(obj), trace);
597 return trace;
598}
599
600/*
601 * call-seq:
602 * thr.set_trace_func(proc) -> proc
603 * thr.set_trace_func(nil) -> nil
604 *
605 * Establishes _proc_ on _thr_ as the handler for tracing, or
606 * disables tracing if the parameter is +nil+.
607 *
608 * See Kernel#set_trace_func.
609 */
610
611static VALUE
612thread_set_trace_func_m(VALUE target_thread, VALUE trace)
613{
614 rb_execution_context_t *ec = GET_EC();
615 rb_thread_t *target_th = rb_thread_ptr(target_thread);
616
617 rb_threadptr_remove_event_hook(ec, target_th, call_trace_func, Qundef);
618
619 if (NIL_P(trace)) {
620 return Qnil;
621 }
622 else {
623 thread_add_trace_func(ec, target_th, trace);
624 return trace;
625 }
626}
627
628static const char *
629get_event_name(rb_event_flag_t event)
630{
631 switch (event) {
632 case RUBY_EVENT_LINE: return "line";
633 case RUBY_EVENT_CLASS: return "class";
634 case RUBY_EVENT_END: return "end";
635 case RUBY_EVENT_CALL: return "call";
636 case RUBY_EVENT_RETURN: return "return";
637 case RUBY_EVENT_C_CALL: return "c-call";
638 case RUBY_EVENT_C_RETURN: return "c-return";
639 case RUBY_EVENT_RAISE: return "raise";
640 default:
641 return "unknown";
642 }
643}
644
645static ID
646get_event_id(rb_event_flag_t event)
647{
648 ID id;
649
650 switch (event) {
651#define C(name, NAME) case RUBY_EVENT_##NAME: CONST_ID(id, #name); return id;
652 C(line, LINE);
653 C(class, CLASS);
654 C(end, END);
655 C(call, CALL);
656 C(return, RETURN);
657 C(c_call, C_CALL);
658 C(c_return, C_RETURN);
659 C(raise, RAISE);
660 C(b_call, B_CALL);
661 C(b_return, B_RETURN);
662 C(thread_begin, THREAD_BEGIN);
663 C(thread_end, THREAD_END);
664 C(fiber_switch, FIBER_SWITCH);
665 C(script_compiled, SCRIPT_COMPILED);
666#undef C
667 default:
668 return 0;
669 }
670}
671
672static void
673get_path_and_lineno(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, rb_event_flag_t event, VALUE *pathp, int *linep)
674{
675 cfp = rb_vm_get_ruby_level_next_cfp(ec, cfp);
676
677 if (cfp) {
678 const rb_iseq_t *iseq = cfp->iseq;
679 *pathp = rb_iseq_path(iseq);
680
681 if (event & (RUBY_EVENT_CLASS |
684 *linep = FIX2INT(rb_iseq_first_lineno(iseq));
685 }
686 else {
687 *linep = rb_vm_get_sourceline(cfp);
688 }
689 }
690 else {
691 *pathp = Qnil;
692 *linep = 0;
693 }
694}
695
696static void
697call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
698{
699 int line;
700 VALUE filename;
701 VALUE eventname = rb_str_new2(get_event_name(event));
702 VALUE argv[6];
703 const rb_execution_context_t *ec = GET_EC();
704
705 get_path_and_lineno(ec, ec->cfp, event, &filename, &line);
706
707 if (!klass) {
708 rb_ec_frame_method_id_and_class(ec, &id, 0, &klass);
709 }
710
711 if (klass) {
712 if (RB_TYPE_P(klass, T_ICLASS)) {
713 klass = RBASIC(klass)->klass;
714 }
715 else if (FL_TEST(klass, FL_SINGLETON)) {
716 klass = rb_ivar_get(klass, id__attached__);
717 }
718 }
719
720 argv[0] = eventname;
721 argv[1] = filename;
722 argv[2] = INT2FIX(line);
723 argv[3] = id ? ID2SYM(id) : Qnil;
724 argv[4] = Qnil;
725 if (self && (filename != Qnil) &&
726 event != RUBY_EVENT_C_CALL &&
727 event != RUBY_EVENT_C_RETURN &&
728 (VM_FRAME_RUBYFRAME_P(ec->cfp) && imemo_type_p((VALUE)ec->cfp->iseq, imemo_iseq))) {
729 argv[4] = rb_binding_new();
730 }
731 argv[5] = klass ? klass : Qnil;
732
733 rb_proc_call_with_block(proc, 6, argv, Qnil);
734}
735
736/* (2-2) TracePoint API */
737
738static VALUE rb_cTracePoint;
739
740typedef struct rb_tp_struct {
741 rb_event_flag_t events;
742 int tracing; /* bool */
743 rb_thread_t *target_th;
744 VALUE local_target_set; /* Hash: target ->
745 * Qtrue (if target is iseq) or
746 * Qfalse (if target is bmethod)
747 */
748 void (*func)(VALUE tpval, void *data);
749 void *data;
750 VALUE proc;
751 rb_ractor_t *ractor;
752 VALUE self;
753} rb_tp_t;
754
755static void
756tp_mark(void *ptr)
757{
758 rb_tp_t *tp = ptr;
759 rb_gc_mark(tp->proc);
760 rb_gc_mark(tp->local_target_set);
761 if (tp->target_th) rb_gc_mark(tp->target_th->self);
762}
763
764static size_t
765tp_memsize(const void *ptr)
766{
767 return sizeof(rb_tp_t);
768}
769
770static const rb_data_type_t tp_data_type = {
771 "tracepoint",
772 {tp_mark, RUBY_TYPED_DEFAULT_FREE, tp_memsize,},
773 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
774};
775
776static VALUE
777tp_alloc(VALUE klass)
778{
779 rb_tp_t *tp;
780 return TypedData_Make_Struct(klass, rb_tp_t, &tp_data_type, tp);
781}
782
783static rb_event_flag_t
784symbol2event_flag(VALUE v)
785{
786 ID id;
787 VALUE sym = rb_to_symbol_type(v);
788 const rb_event_flag_t RUBY_EVENT_A_CALL =
790 const rb_event_flag_t RUBY_EVENT_A_RETURN =
792
793#define C(name, NAME) CONST_ID(id, #name); if (sym == ID2SYM(id)) return RUBY_EVENT_##NAME
794 C(line, LINE);
795 C(class, CLASS);
796 C(end, END);
797 C(call, CALL);
798 C(return, RETURN);
799 C(c_call, C_CALL);
800 C(c_return, C_RETURN);
801 C(raise, RAISE);
802 C(b_call, B_CALL);
803 C(b_return, B_RETURN);
804 C(thread_begin, THREAD_BEGIN);
805 C(thread_end, THREAD_END);
806 C(fiber_switch, FIBER_SWITCH);
807 C(script_compiled, SCRIPT_COMPILED);
808
809 /* joke */
810 C(a_call, A_CALL);
811 C(a_return, A_RETURN);
812#undef C
813 rb_raise(rb_eArgError, "unknown event: %"PRIsVALUE, rb_sym2str(sym));
814}
815
816static rb_tp_t *
817tpptr(VALUE tpval)
818{
819 rb_tp_t *tp;
820 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
821 return tp;
822}
823
824static rb_trace_arg_t *
825get_trace_arg(void)
826{
827 rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
828 if (trace_arg == 0) {
829 rb_raise(rb_eRuntimeError, "access from outside");
830 }
831 return trace_arg;
832}
833
834struct rb_trace_arg_struct *
836{
837 return get_trace_arg();
838}
839
842{
843 return trace_arg->event;
844}
845
846VALUE
848{
849 return ID2SYM(get_event_id(trace_arg->event));
850}
851
852static void
853fill_path_and_lineno(rb_trace_arg_t *trace_arg)
854{
855 if (UNDEF_P(trace_arg->path)) {
856 get_path_and_lineno(trace_arg->ec, trace_arg->cfp, trace_arg->event, &trace_arg->path, &trace_arg->lineno);
857 }
858}
859
860VALUE
862{
863 fill_path_and_lineno(trace_arg);
864 return INT2FIX(trace_arg->lineno);
865}
866VALUE
868{
869 fill_path_and_lineno(trace_arg);
870 return trace_arg->path;
871}
872
873static void
874fill_id_and_klass(rb_trace_arg_t *trace_arg)
875{
876 if (!trace_arg->klass_solved) {
877 if (!trace_arg->klass) {
878 rb_vm_control_frame_id_and_class(trace_arg->cfp, &trace_arg->id, &trace_arg->called_id, &trace_arg->klass);
879 }
880
881 if (trace_arg->klass) {
882 if (RB_TYPE_P(trace_arg->klass, T_ICLASS)) {
883 trace_arg->klass = RBASIC(trace_arg->klass)->klass;
884 }
885 }
886 else {
887 trace_arg->klass = Qnil;
888 }
889
890 trace_arg->klass_solved = 1;
891 }
892}
893
894VALUE
895rb_tracearg_parameters(rb_trace_arg_t *trace_arg)
896{
897 switch (trace_arg->event) {
898 case RUBY_EVENT_CALL:
901 case RUBY_EVENT_B_RETURN: {
902 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(trace_arg->ec, trace_arg->cfp);
903 if (cfp) {
904 int is_proc = 0;
905 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_BLOCK && !VM_FRAME_LAMBDA_P(cfp)) {
906 is_proc = 1;
907 }
908 return rb_iseq_parameters(cfp->iseq, is_proc);
909 }
910 break;
911 }
913 case RUBY_EVENT_C_RETURN: {
914 fill_id_and_klass(trace_arg);
915 if (trace_arg->klass && trace_arg->id) {
916 const rb_method_entry_t *me;
917 VALUE iclass = Qnil;
918 me = rb_method_entry_without_refinements(trace_arg->klass, trace_arg->called_id, &iclass);
919 return rb_unnamed_parameters(rb_method_entry_arity(me));
920 }
921 break;
922 }
923 case RUBY_EVENT_RAISE:
924 case RUBY_EVENT_LINE:
925 case RUBY_EVENT_CLASS:
926 case RUBY_EVENT_END:
928 rb_raise(rb_eRuntimeError, "not supported by this event");
929 break;
930 }
931 return Qnil;
932}
933
934VALUE
936{
937 fill_id_and_klass(trace_arg);
938 return trace_arg->id ? ID2SYM(trace_arg->id) : Qnil;
939}
940
941VALUE
943{
944 fill_id_and_klass(trace_arg);
945 return trace_arg->called_id ? ID2SYM(trace_arg->called_id) : Qnil;
946}
947
948VALUE
950{
951 fill_id_and_klass(trace_arg);
952 return trace_arg->klass;
953}
954
955VALUE
957{
959 switch (trace_arg->event) {
962 return Qnil;
963 }
964 cfp = rb_vm_get_binding_creatable_next_cfp(trace_arg->ec, trace_arg->cfp);
965
966 if (cfp && imemo_type_p((VALUE)cfp->iseq, imemo_iseq)) {
967 return rb_vm_make_binding(trace_arg->ec, cfp);
968 }
969 else {
970 return Qnil;
971 }
972}
973
974VALUE
976{
977 return trace_arg->self;
978}
979
980VALUE
982{
983 if (trace_arg->event & (RUBY_EVENT_RETURN | RUBY_EVENT_C_RETURN | RUBY_EVENT_B_RETURN)) {
984 /* ok */
985 }
986 else {
987 rb_raise(rb_eRuntimeError, "not supported by this event");
988 }
989 if (UNDEF_P(trace_arg->data)) {
990 rb_bug("rb_tracearg_return_value: unreachable");
991 }
992 return trace_arg->data;
993}
994
995VALUE
997{
998 if (trace_arg->event & (RUBY_EVENT_RAISE)) {
999 /* ok */
1000 }
1001 else {
1002 rb_raise(rb_eRuntimeError, "not supported by this event");
1003 }
1004 if (UNDEF_P(trace_arg->data)) {
1005 rb_bug("rb_tracearg_raised_exception: unreachable");
1006 }
1007 return trace_arg->data;
1008}
1009
1010VALUE
1011rb_tracearg_eval_script(rb_trace_arg_t *trace_arg)
1012{
1013 VALUE data = trace_arg->data;
1014
1015 if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
1016 /* ok */
1017 }
1018 else {
1019 rb_raise(rb_eRuntimeError, "not supported by this event");
1020 }
1021 if (UNDEF_P(data)) {
1022 rb_bug("rb_tracearg_raised_exception: unreachable");
1023 }
1024 if (rb_obj_is_iseq(data)) {
1025 return Qnil;
1026 }
1027 else {
1028 VM_ASSERT(RB_TYPE_P(data, T_ARRAY));
1029 /* [src, iseq] */
1030 return RARRAY_AREF(data, 0);
1031 }
1032}
1033
1034VALUE
1035rb_tracearg_instruction_sequence(rb_trace_arg_t *trace_arg)
1036{
1037 VALUE data = trace_arg->data;
1038
1039 if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
1040 /* ok */
1041 }
1042 else {
1043 rb_raise(rb_eRuntimeError, "not supported by this event");
1044 }
1045 if (UNDEF_P(data)) {
1046 rb_bug("rb_tracearg_raised_exception: unreachable");
1047 }
1048
1049 if (rb_obj_is_iseq(data)) {
1050 return rb_iseqw_new((const rb_iseq_t *)data);
1051 }
1052 else {
1053 VM_ASSERT(RB_TYPE_P(data, T_ARRAY));
1054 VM_ASSERT(rb_obj_is_iseq(RARRAY_AREF(data, 1)));
1055
1056 /* [src, iseq] */
1057 return rb_iseqw_new((const rb_iseq_t *)RARRAY_AREF(data, 1));
1058 }
1059}
1060
1061VALUE
1063{
1064 if (trace_arg->event & (RUBY_INTERNAL_EVENT_NEWOBJ | RUBY_INTERNAL_EVENT_FREEOBJ)) {
1065 /* ok */
1066 }
1067 else {
1068 rb_raise(rb_eRuntimeError, "not supported by this event");
1069 }
1070 if (UNDEF_P(trace_arg->data)) {
1071 rb_bug("rb_tracearg_object: unreachable");
1072 }
1073 return trace_arg->data;
1074}
1075
1076static VALUE
1077tracepoint_attr_event(rb_execution_context_t *ec, VALUE tpval)
1078{
1079 return rb_tracearg_event(get_trace_arg());
1080}
1081
1082static VALUE
1083tracepoint_attr_lineno(rb_execution_context_t *ec, VALUE tpval)
1084{
1085 return rb_tracearg_lineno(get_trace_arg());
1086}
1087static VALUE
1088tracepoint_attr_path(rb_execution_context_t *ec, VALUE tpval)
1089{
1090 return rb_tracearg_path(get_trace_arg());
1091}
1092
1093static VALUE
1094tracepoint_attr_parameters(rb_execution_context_t *ec, VALUE tpval)
1095{
1096 return rb_tracearg_parameters(get_trace_arg());
1097}
1098
1099static VALUE
1100tracepoint_attr_method_id(rb_execution_context_t *ec, VALUE tpval)
1101{
1102 return rb_tracearg_method_id(get_trace_arg());
1103}
1104
1105static VALUE
1106tracepoint_attr_callee_id(rb_execution_context_t *ec, VALUE tpval)
1107{
1108 return rb_tracearg_callee_id(get_trace_arg());
1109}
1110
1111static VALUE
1112tracepoint_attr_defined_class(rb_execution_context_t *ec, VALUE tpval)
1113{
1114 return rb_tracearg_defined_class(get_trace_arg());
1115}
1116
1117static VALUE
1118tracepoint_attr_binding(rb_execution_context_t *ec, VALUE tpval)
1119{
1120 return rb_tracearg_binding(get_trace_arg());
1121}
1122
1123static VALUE
1124tracepoint_attr_self(rb_execution_context_t *ec, VALUE tpval)
1125{
1126 return rb_tracearg_self(get_trace_arg());
1127}
1128
1129static VALUE
1130tracepoint_attr_return_value(rb_execution_context_t *ec, VALUE tpval)
1131{
1132 return rb_tracearg_return_value(get_trace_arg());
1133}
1134
1135static VALUE
1136tracepoint_attr_raised_exception(rb_execution_context_t *ec, VALUE tpval)
1137{
1138 return rb_tracearg_raised_exception(get_trace_arg());
1139}
1140
1141static VALUE
1142tracepoint_attr_eval_script(rb_execution_context_t *ec, VALUE tpval)
1143{
1144 return rb_tracearg_eval_script(get_trace_arg());
1145}
1146
1147static VALUE
1148tracepoint_attr_instruction_sequence(rb_execution_context_t *ec, VALUE tpval)
1149{
1150 return rb_tracearg_instruction_sequence(get_trace_arg());
1151}
1152
1153static void
1154tp_call_trace(VALUE tpval, rb_trace_arg_t *trace_arg)
1155{
1156 rb_tp_t *tp = tpptr(tpval);
1157
1158 if (tp->func) {
1159 (*tp->func)(tpval, tp->data);
1160 }
1161 else {
1162 if (tp->ractor == NULL || tp->ractor == GET_RACTOR()) {
1163 rb_proc_call_with_block((VALUE)tp->proc, 1, &tpval, Qnil);
1164 }
1165 }
1166}
1167
1168VALUE
1170{
1171 rb_tp_t *tp;
1172 tp = tpptr(tpval);
1173
1174 if (tp->local_target_set != Qfalse) {
1175 rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
1176 }
1177
1178 if (tp->target_th) {
1179 rb_thread_add_event_hook2(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1180 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1181 }
1182 else {
1183 rb_add_event_hook2((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1184 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1185 }
1186 tp->tracing = 1;
1187 return Qundef;
1188}
1189
1190static const rb_iseq_t *
1191iseq_of(VALUE target)
1192{
1193 VALUE iseqv = rb_funcall(rb_cISeq, rb_intern("of"), 1, target);
1194 if (NIL_P(iseqv)) {
1195 rb_raise(rb_eArgError, "specified target is not supported");
1196 }
1197 else {
1198 return rb_iseqw_to_iseq(iseqv);
1199 }
1200}
1201
1202const rb_method_definition_t *rb_method_def(VALUE method); /* proc.c */
1203
1204static VALUE
1205rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
1206{
1207 rb_tp_t *tp = tpptr(tpval);
1208 const rb_iseq_t *iseq = iseq_of(target);
1209 int n = 0;
1210 unsigned int line = 0;
1211 bool target_bmethod = false;
1212
1213 if (tp->tracing > 0) {
1214 rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
1215 }
1216
1217 if (!NIL_P(target_line)) {
1218 if ((tp->events & RUBY_EVENT_LINE) == 0) {
1219 rb_raise(rb_eArgError, "target_line is specified, but line event is not specified");
1220 }
1221 else {
1222 line = NUM2UINT(target_line);
1223 }
1224 }
1225
1226 VM_ASSERT(tp->local_target_set == Qfalse);
1227 tp->local_target_set = rb_obj_hide(rb_ident_hash_new());
1228
1229 /* bmethod */
1230 if (rb_obj_is_method(target)) {
1231 rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
1232 if (def->type == VM_METHOD_TYPE_BMETHOD &&
1233 (tp->events & (RUBY_EVENT_CALL | RUBY_EVENT_RETURN))) {
1234 if (def->body.bmethod.hooks == NULL) {
1235 def->body.bmethod.hooks = ZALLOC(rb_hook_list_t);
1236 }
1237 rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
1238 rb_hash_aset(tp->local_target_set, target, Qfalse);
1239 target_bmethod = true;
1240
1241 n++;
1242 }
1243 }
1244
1245 /* iseq */
1246 n += rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line, target_bmethod);
1247 rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
1248
1249
1250 if (n == 0) {
1251 rb_raise(rb_eArgError, "can not enable any hooks");
1252 }
1253
1254 rb_yjit_tracing_invalidate_all();
1255 rb_mjit_tracing_invalidate_all(tp->events);
1256
1257 ruby_vm_event_local_num++;
1258
1259 tp->tracing = 1;
1260
1261 return Qnil;
1262}
1263
1264static int
1265disable_local_event_iseq_i(VALUE target, VALUE iseq_p, VALUE tpval)
1266{
1267 if (iseq_p) {
1268 rb_iseq_remove_local_tracepoint_recursively((rb_iseq_t *)target, tpval);
1269 }
1270 else {
1271 /* bmethod */
1272 rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
1273 rb_hook_list_t *hooks = def->body.bmethod.hooks;
1274 VM_ASSERT(hooks != NULL);
1275 rb_hook_list_remove_tracepoint(hooks, tpval);
1276
1277 if (hooks->events == 0) {
1278 rb_hook_list_free(def->body.bmethod.hooks);
1279 def->body.bmethod.hooks = NULL;
1280 }
1281 }
1282 return ST_CONTINUE;
1283}
1284
1285VALUE
1287{
1288 rb_tp_t *tp;
1289
1290 tp = tpptr(tpval);
1291
1292 if (tp->local_target_set) {
1293 rb_hash_foreach(tp->local_target_set, disable_local_event_iseq_i, tpval);
1294 tp->local_target_set = Qfalse;
1295 ruby_vm_event_local_num--;
1296 }
1297 else {
1298 if (tp->target_th) {
1299 rb_thread_remove_event_hook_with_data(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tpval);
1300 }
1301 else {
1303 }
1304 }
1305 tp->tracing = 0;
1306 tp->target_th = NULL;
1307 return Qundef;
1308}
1309
1310void
1311rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line)
1312{
1313 rb_tp_t *tp = tpptr(tpval);
1314 rb_event_hook_t *hook = alloc_event_hook((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1315 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1316 hook->filter.target_line = target_line;
1317 hook_list_connect(target, list, hook, FALSE);
1318}
1319
1320void
1321rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval)
1322{
1323 rb_event_hook_t *hook = list->hooks;
1324 rb_event_flag_t events = 0;
1325
1326 while (hook) {
1327 if (hook->data == tpval) {
1328 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
1329 list->need_clean = true;
1330 }
1331 else if ((hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) == 0) {
1332 events |= hook->events;
1333 }
1334 hook = hook->next;
1335 }
1336
1337 list->events = events;
1338}
1339
1340static VALUE
1341tracepoint_enable_m(rb_execution_context_t *ec, VALUE tpval, VALUE target, VALUE target_line, VALUE target_thread)
1342{
1343 rb_tp_t *tp = tpptr(tpval);
1344 int previous_tracing = tp->tracing;
1345
1346 if (target_thread == sym_default) {
1347 if (rb_block_given_p() && NIL_P(target) && NIL_P(target_line)) {
1348 target_thread = rb_thread_current();
1349 }
1350 else {
1351 target_thread = Qnil;
1352 }
1353 }
1354
1355 /* check target_thread */
1356 if (RTEST(target_thread)) {
1357 if (tp->target_th) {
1358 rb_raise(rb_eArgError, "can not override target_thread filter");
1359 }
1360 tp->target_th = rb_thread_ptr(target_thread);
1361 }
1362 else {
1363 tp->target_th = NULL;
1364 }
1365
1366 if (NIL_P(target)) {
1367 if (!NIL_P(target_line)) {
1368 rb_raise(rb_eArgError, "only target_line is specified");
1369 }
1370 rb_tracepoint_enable(tpval);
1371 }
1372 else {
1373 rb_tracepoint_enable_for_target(tpval, target, target_line);
1374 }
1375
1376 if (rb_block_given_p()) {
1377 return rb_ensure(rb_yield, Qundef,
1378 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1379 tpval);
1380 }
1381 else {
1382 return RBOOL(previous_tracing);
1383 }
1384}
1385
1386static VALUE
1387tracepoint_disable_m(rb_execution_context_t *ec, VALUE tpval)
1388{
1389 rb_tp_t *tp = tpptr(tpval);
1390 int previous_tracing = tp->tracing;
1391
1392 if (rb_block_given_p()) {
1393 if (tp->local_target_set != Qfalse) {
1394 rb_raise(rb_eArgError, "can't disable a targeting TracePoint in a block");
1395 }
1396
1397 rb_tracepoint_disable(tpval);
1398 return rb_ensure(rb_yield, Qundef,
1399 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1400 tpval);
1401 }
1402 else {
1403 rb_tracepoint_disable(tpval);
1404 return RBOOL(previous_tracing);
1405 }
1406}
1407
1408VALUE
1410{
1411 rb_tp_t *tp = tpptr(tpval);
1412 return RBOOL(tp->tracing);
1413}
1414
1415static VALUE
1416tracepoint_enabled_p(rb_execution_context_t *ec, VALUE tpval)
1417{
1418 return rb_tracepoint_enabled_p(tpval);
1419}
1420
1421static VALUE
1422tracepoint_new(VALUE klass, rb_thread_t *target_th, rb_event_flag_t events, void (func)(VALUE, void*), void *data, VALUE proc)
1423{
1424 VALUE tpval = tp_alloc(klass);
1425 rb_tp_t *tp;
1426 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
1427
1428 tp->proc = proc;
1429 tp->ractor = rb_ractor_shareable_p(proc) ? NULL : GET_RACTOR();
1430 tp->func = func;
1431 tp->data = data;
1432 tp->events = events;
1433 tp->self = tpval;
1434
1435 return tpval;
1436}
1437
1438VALUE
1439rb_tracepoint_new(VALUE target_thval, rb_event_flag_t events, void (*func)(VALUE, void *), void *data)
1440{
1441 rb_thread_t *target_th = NULL;
1442
1443 if (RTEST(target_thval)) {
1444 target_th = rb_thread_ptr(target_thval);
1445 /* TODO: Test it!
1446 * Warning: This function is not tested.
1447 */
1448 }
1449 return tracepoint_new(rb_cTracePoint, target_th, events, func, data, Qundef);
1450}
1451
1452static VALUE
1453tracepoint_new_s(rb_execution_context_t *ec, VALUE self, VALUE args)
1454{
1455 rb_event_flag_t events = 0;
1456 long i;
1457 long argc = RARRAY_LEN(args);
1458
1459 if (argc > 0) {
1460 for (i=0; i<argc; i++) {
1461 events |= symbol2event_flag(RARRAY_AREF(args, i));
1462 }
1463 }
1464 else {
1466 }
1467
1468 if (!rb_block_given_p()) {
1469 rb_raise(rb_eArgError, "must be called with a block");
1470 }
1471
1472 return tracepoint_new(self, 0, events, 0, 0, rb_block_proc());
1473}
1474
1475static VALUE
1476tracepoint_trace_s(rb_execution_context_t *ec, VALUE self, VALUE args)
1477{
1478 VALUE trace = tracepoint_new_s(ec, self, args);
1479 rb_tracepoint_enable(trace);
1480 return trace;
1481}
1482
1483static VALUE
1484tracepoint_inspect(rb_execution_context_t *ec, VALUE self)
1485{
1486 rb_tp_t *tp = tpptr(self);
1487 rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
1488
1489 if (trace_arg) {
1490 switch (trace_arg->event) {
1491 case RUBY_EVENT_LINE:
1492 {
1493 VALUE sym = rb_tracearg_method_id(trace_arg);
1494 if (NIL_P(sym))
1495 break;
1496 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE":%d in `%"PRIsVALUE"'>",
1497 rb_tracearg_event(trace_arg),
1498 rb_tracearg_path(trace_arg),
1499 FIX2INT(rb_tracearg_lineno(trace_arg)),
1500 sym);
1501 }
1502 case RUBY_EVENT_CALL:
1503 case RUBY_EVENT_C_CALL:
1504 case RUBY_EVENT_RETURN:
1506 return rb_sprintf("#<TracePoint:%"PRIsVALUE" `%"PRIsVALUE"' %"PRIsVALUE":%d>",
1507 rb_tracearg_event(trace_arg),
1508 rb_tracearg_method_id(trace_arg),
1509 rb_tracearg_path(trace_arg),
1510 FIX2INT(rb_tracearg_lineno(trace_arg)));
1513 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE">",
1514 rb_tracearg_event(trace_arg),
1515 rb_tracearg_self(trace_arg));
1516 default:
1517 break;
1518 }
1519 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE":%d>",
1520 rb_tracearg_event(trace_arg),
1521 rb_tracearg_path(trace_arg),
1522 FIX2INT(rb_tracearg_lineno(trace_arg)));
1523 }
1524 else {
1525 return rb_sprintf("#<TracePoint:%s>", tp->tracing ? "enabled" : "disabled");
1526 }
1527}
1528
1529static void
1530tracepoint_stat_event_hooks(VALUE hash, VALUE key, rb_event_hook_t *hook)
1531{
1532 int active = 0, deleted = 0;
1533
1534 while (hook) {
1535 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
1536 deleted++;
1537 }
1538 else {
1539 active++;
1540 }
1541 hook = hook->next;
1542 }
1543
1544 rb_hash_aset(hash, key, rb_ary_new3(2, INT2FIX(active), INT2FIX(deleted)));
1545}
1546
1547static VALUE
1548tracepoint_stat_s(rb_execution_context_t *ec, VALUE self)
1549{
1550 rb_vm_t *vm = GET_VM();
1551 VALUE stat = rb_hash_new();
1552
1553 tracepoint_stat_event_hooks(stat, vm->self, rb_ec_ractor_hooks(ec)->hooks);
1554 /* TODO: thread local hooks */
1555
1556 return stat;
1557}
1558
1559static VALUE
1560disallow_reentry(VALUE val)
1561{
1562 rb_trace_arg_t *arg = (rb_trace_arg_t *)val;
1563 rb_execution_context_t *ec = GET_EC();
1564 if (ec->trace_arg != NULL) rb_bug("should be NULL, but %p", (void *)ec->trace_arg);
1565 ec->trace_arg = arg;
1566 return Qnil;
1567}
1568
1569static VALUE
1570tracepoint_allow_reentry(rb_execution_context_t *ec, VALUE self)
1571{
1572 const rb_trace_arg_t *arg = ec->trace_arg;
1573 if (arg == NULL) rb_raise(rb_eRuntimeError, "No need to allow reentrance.");
1574 ec->trace_arg = NULL;
1575 return rb_ensure(rb_yield, Qnil, disallow_reentry, (VALUE)arg);
1576}
1577
1578#include "trace_point.rbinc"
1579
1580/* This function is called from inits.c */
1581void
1582Init_vm_trace(void)
1583{
1584 sym_default = ID2SYM(rb_intern_const("default"));
1585
1586 /* trace_func */
1587 rb_define_global_function("set_trace_func", set_trace_func, 1);
1588 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
1589 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
1590
1591 rb_cTracePoint = rb_define_class("TracePoint", rb_cObject);
1592 rb_undef_alloc_func(rb_cTracePoint);
1593}
1594
1597 void *data;
1599
1600#define MAX_POSTPONED_JOB 1000
1601#define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24
1602
1604 struct ccan_list_node jnode; /* <=> vm->workqueue */
1606};
1607
1608// Used for VM memsize reporting. Returns the size of a list of rb_workqueue_job
1609// structs. Defined here because the struct definition lives here as well.
1610size_t
1611rb_vm_memsize_workqueue(struct ccan_list_head *workqueue)
1612{
1613 struct rb_workqueue_job *work = 0;
1614 size_t size = 0;
1615
1616 ccan_list_for_each(workqueue, work, jnode) {
1617 size += sizeof(struct rb_workqueue_job);
1618 }
1619
1620 return size;
1621}
1622
1623// Used for VM memsize reporting. Returns the total size of the postponed job
1624// buffer that was allocated at initialization.
1625size_t
1626rb_vm_memsize_postponed_job_buffer(void)
1627{
1628 return sizeof(rb_postponed_job_t) * MAX_POSTPONED_JOB;
1629}
1630
1631void
1632Init_vm_postponed_job(void)
1633{
1634 rb_vm_t *vm = GET_VM();
1635 vm->postponed_job_buffer = ALLOC_N(rb_postponed_job_t, MAX_POSTPONED_JOB);
1636 vm->postponed_job_index = 0;
1637 /* workqueue is initialized when VM locks are initialized */
1638}
1639
1640enum postponed_job_register_result {
1641 PJRR_SUCCESS = 0,
1642 PJRR_FULL = 1,
1643 PJRR_INTERRUPTED = 2
1644};
1645
1646/* Async-signal-safe */
1647static enum postponed_job_register_result
1648postponed_job_register(rb_execution_context_t *ec, rb_vm_t *vm,
1649 unsigned int flags, rb_postponed_job_func_t func, void *data, rb_atomic_t max, rb_atomic_t expected_index)
1650{
1651 rb_postponed_job_t *pjob;
1652
1653 if (expected_index >= max) return PJRR_FULL; /* failed */
1654
1655 if (ATOMIC_CAS(vm->postponed_job_index, expected_index, expected_index+1) == expected_index) {
1656 pjob = &vm->postponed_job_buffer[expected_index];
1657 }
1658 else {
1659 return PJRR_INTERRUPTED;
1660 }
1661
1662 /* unused: pjob->flags = flags; */
1663 pjob->func = func;
1664 pjob->data = data;
1665
1666 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec);
1667
1668 return PJRR_SUCCESS;
1669}
1670
1672get_valid_ec(rb_vm_t *vm)
1673{
1674 rb_execution_context_t *ec = rb_current_execution_context(false);
1675 if (ec == NULL) ec = rb_vm_main_ractor_ec(vm);
1676 return ec;
1677}
1678
1679/*
1680 * return 0 if job buffer is full
1681 * Async-signal-safe
1682 */
1683int
1684rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
1685{
1686 rb_vm_t *vm = GET_VM();
1687 rb_execution_context_t *ec = get_valid_ec(vm);
1688
1689 begin:
1690 switch (postponed_job_register(ec, vm, flags, func, data, MAX_POSTPONED_JOB, vm->postponed_job_index)) {
1691 case PJRR_SUCCESS : return 1;
1692 case PJRR_FULL : return 0;
1693 case PJRR_INTERRUPTED: goto begin;
1694 default: rb_bug("unreachable\n");
1695 }
1696}
1697
1698/*
1699 * return 0 if job buffer is full
1700 * Async-signal-safe
1701 */
1702int
1703rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
1704{
1705 rb_vm_t *vm = GET_VM();
1706 rb_execution_context_t *ec = get_valid_ec(vm);
1707 rb_postponed_job_t *pjob;
1708 rb_atomic_t i, index;
1709
1710 begin:
1711 index = vm->postponed_job_index;
1712 for (i=0; i<index; i++) {
1713 pjob = &vm->postponed_job_buffer[i];
1714 if (pjob->func == func) {
1715 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec);
1716 return 2;
1717 }
1718 }
1719 switch (postponed_job_register(ec, vm, flags, func, data, MAX_POSTPONED_JOB + MAX_POSTPONED_JOB_SPECIAL_ADDITION, index)) {
1720 case PJRR_SUCCESS : return 1;
1721 case PJRR_FULL : return 0;
1722 case PJRR_INTERRUPTED: goto begin;
1723 default: rb_bug("unreachable\n");
1724 }
1725}
1726
1727/*
1728 * thread-safe and called from non-Ruby thread
1729 * returns FALSE on failure (ENOMEM), TRUE otherwise
1730 */
1731int
1732rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
1733{
1734 struct rb_workqueue_job *wq_job = malloc(sizeof(*wq_job));
1735 rb_vm_t *vm = GET_VM();
1736
1737 if (!wq_job) return FALSE;
1738 wq_job->job.func = func;
1739 wq_job->job.data = data;
1740
1741 rb_nativethread_lock_lock(&vm->workqueue_lock);
1742 ccan_list_add_tail(&vm->workqueue, &wq_job->jnode);
1743 rb_nativethread_lock_unlock(&vm->workqueue_lock);
1744
1745 // TODO: current implementation affects only main ractor
1746 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(rb_vm_main_ractor_ec(vm));
1747
1748 return TRUE;
1749}
1750
1751void
1752rb_postponed_job_flush(rb_vm_t *vm)
1753{
1754 rb_execution_context_t *ec = GET_EC();
1755 const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK|TRAP_INTERRUPT_MASK;
1756 volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
1757 VALUE volatile saved_errno = ec->errinfo;
1758 struct ccan_list_head tmp;
1759
1760 ccan_list_head_init(&tmp);
1761
1762 rb_nativethread_lock_lock(&vm->workqueue_lock);
1763 ccan_list_append_list(&tmp, &vm->workqueue);
1764 rb_nativethread_lock_unlock(&vm->workqueue_lock);
1765
1766 ec->errinfo = Qnil;
1767 /* mask POSTPONED_JOB dispatch */
1768 ec->interrupt_mask |= block_mask;
1769 {
1770 EC_PUSH_TAG(ec);
1771 if (EC_EXEC_TAG() == TAG_NONE) {
1772 rb_atomic_t index;
1773 struct rb_workqueue_job *wq_job;
1774
1775 while ((index = vm->postponed_job_index) > 0) {
1776 if (ATOMIC_CAS(vm->postponed_job_index, index, index-1) == index) {
1777 rb_postponed_job_t *pjob = &vm->postponed_job_buffer[index-1];
1778 (*pjob->func)(pjob->data);
1779 }
1780 }
1781 while ((wq_job = ccan_list_pop(&tmp, struct rb_workqueue_job, jnode))) {
1782 rb_postponed_job_t pjob = wq_job->job;
1783
1784 free(wq_job);
1785 (pjob.func)(pjob.data);
1786 }
1787 }
1788 EC_POP_TAG();
1789 }
1790 /* restore POSTPONED_JOB mask */
1791 ec->interrupt_mask &= ~(saved_mask ^ block_mask);
1792 ec->errinfo = saved_errno;
1793
1794 /* don't leak memory if a job threw an exception */
1795 if (!ccan_list_empty(&tmp)) {
1796 rb_nativethread_lock_lock(&vm->workqueue_lock);
1797 ccan_list_prepend_list(&vm->workqueue, &tmp);
1798 rb_nativethread_lock_unlock(&vm->workqueue_lock);
1799
1800 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
1801 }
1802}
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_global_function(mid, func, arity)
Defines rb_mKernel #mid.
VALUE rb_tracearg_binding(rb_trace_arg_t *trace_arg)
Creates a binding object of the point where the trace is at.
Definition vm_trace.c:956
VALUE rb_tracepoint_enabled_p(VALUE tpval)
Queries if the passed TracePoint is up and running.
Definition vm_trace.c:1409
VALUE rb_tracearg_object(rb_trace_arg_t *trace_arg)
Queries the allocated/deallocated object that the trace represents.
Definition vm_trace.c:1062
VALUE rb_tracearg_callee_id(rb_trace_arg_t *trace_arg)
Identical to rb_tracearg_method_id(), except it returns callee id like rb_frame_callee().
Definition vm_trace.c:942
VALUE rb_tracearg_defined_class(rb_trace_arg_t *trace_arg)
Queries the class that defines the method that the passed trace is at.
Definition vm_trace.c:949
VALUE rb_tracepoint_new(VALUE target_thread_not_supported_yet, rb_event_flag_t events, void(*func)(VALUE, void *), void *data)
Creates a tracepoint by registering a callback function for one or more tracepoint events.
Definition vm_trace.c:1439
VALUE rb_tracearg_raised_exception(rb_trace_arg_t *trace_arg)
Queries the raised exception that the trace represents.
Definition vm_trace.c:996
void rb_thread_add_event_hook(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Identical to rb_add_event_hook(), except its effect is limited to the passed thread.
Definition vm_trace.c:179
VALUE rb_tracepoint_disable(VALUE tpval)
Stops (disables) an already running instance of TracePoint.
Definition vm_trace.c:1286
VALUE rb_tracearg_self(rb_trace_arg_t *trace_arg)
Queries the receiver of the point trace is at.
Definition vm_trace.c:975
int rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
Identical to rb_remove_event_hook(), except it additionally takes a thread argument.
Definition vm_trace.c:281
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register_one(), except it additionally checks for duplicated registrati...
Definition vm_trace.c:1703
VALUE rb_tracearg_return_value(rb_trace_arg_t *trace_arg)
Queries the return value that the trace represents.
Definition vm_trace.c:981
rb_event_flag_t rb_tracearg_event_flag(rb_trace_arg_t *trace_arg)
Queries the event of the passed trace.
Definition vm_trace.c:841
VALUE rb_tracearg_path(rb_trace_arg_t *trace_arg)
Queries the file name of the point where the trace is at.
Definition vm_trace.c:867
int rb_thread_remove_event_hook_with_data(VALUE thval, rb_event_hook_func_t func, VALUE data)
Identical to rb_thread_remove_event_hook(), except it additionally takes the data argument.
Definition vm_trace.c:287
VALUE rb_tracepoint_enable(VALUE tpval)
Starts (enables) trace(s) defined by the passed object.
Definition vm_trace.c:1169
int rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
Registers a postponed job.
Definition vm_trace.c:1684
VALUE rb_tracearg_method_id(rb_trace_arg_t *trace_arg)
Queries the method name of the point where the trace is at.
Definition vm_trace.c:935
int rb_remove_event_hook_with_data(rb_event_hook_func_t func, VALUE data)
Identical to rb_remove_event_hook(), except it additionally takes the data argument.
Definition vm_trace.c:299
rb_trace_arg_t * rb_tracearg_from_tracepoint(VALUE tpval)
Queries the current event of the passed tracepoint.
Definition vm_trace.c:835
VALUE rb_tracearg_lineno(rb_trace_arg_t *trace_arg)
Queries the line of the point where the trace is at.
Definition vm_trace.c:861
void(* rb_postponed_job_func_t)(void *arg)
Type of postponed jobs.
Definition debug.h:608
VALUE rb_tracearg_event(rb_trace_arg_t *trace_arg)
Identical to rb_tracearg_event_flag(), except it returns the name of the event in Ruby's symbol.
Definition vm_trace.c:847
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:36
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
Definition event.h:39
#define RUBY_EVENT_TRACEPOINT_ALL
Bitmask of extended events.
Definition event.h:57
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Registers an event hook function.
Definition vm_trace.c:185
#define RUBY_EVENT_RAISE
Encountered a raise statement.
Definition event.h:41
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:52
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:56
#define RUBY_INTERNAL_EVENT_MASK
Bitmask of internal events.
Definition event.h:96
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:293
#define RUBY_EVENT_ALL
Bitmask of traditional events.
Definition event.h:42
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:53
#define RUBY_EVENT_CLASS
Encountered a new class.
Definition event.h:35
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:115
#define RUBY_EVENT_LINE
Encountered a new line.
Definition event.h:34
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:38
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:40
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
Definition event.h:51
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:89
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:103
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:37
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:88
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:54
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:923
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:868
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:396
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define NUM2UINT
Old name of RB_NUM2UINT.
Definition int.h:45
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:393
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:652
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:139
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition error.c:3150
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition error.c:794
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1091
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1089
VALUE rb_eArgError
ArgumentError exception.
Definition error.c:1092
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:84
VALUE rb_cThread
Thread class.
Definition vm.c:466
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition rgengc.h:232
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1102
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:848
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
Definition proc.c:1027
VALUE rb_obj_is_method(VALUE recv)
Queries if the given object is a method.
Definition proc.c:1637
VALUE rb_binding_new(void)
Snapshots the current execution context and turn it into an instance of rb_cBinding.
Definition proc.c:385
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:175
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2793
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1218
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1142
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:276
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
Definition symbol.c:789
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition symbol.c:942
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition sprintf.c:1219
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1357
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:161
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:68
#define RARRAY_AREF(a, i)
Definition rarray.h:583
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:507
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:489
#define RTEST
This is an old name of RB_TEST.
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:190
Definition method.h:54
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:299
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:305
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:375