Ruby 3.2.3p157 (2024-01-18 revision 52bb2ac0a6971d0391efa2275f7a66bff319087c)
vm_callinfo.h
1#ifndef RUBY_VM_CALLINFO_H /*-*-C-*-vi:se ft=c:*/
2#define RUBY_VM_CALLINFO_H
11#include "debug_counter.h"
12#include "internal/class.h"
13#include "shape.h"
14
15enum vm_call_flag_bits {
16 VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
17 VM_CALL_ARGS_BLOCKARG_bit, /* m(&block) */
18 VM_CALL_FCALL_bit, /* m(...) */
19 VM_CALL_VCALL_bit, /* m */
20 VM_CALL_ARGS_SIMPLE_bit, /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
21 VM_CALL_BLOCKISEQ_bit, /* has blockiseq */
22 VM_CALL_KWARG_bit, /* has kwarg */
23 VM_CALL_KW_SPLAT_bit, /* m(**opts) */
24 VM_CALL_TAILCALL_bit, /* located at tail position */
25 VM_CALL_SUPER_bit, /* super */
26 VM_CALL_ZSUPER_bit, /* zsuper */
27 VM_CALL_OPT_SEND_bit, /* internal flag */
28 VM_CALL_KW_SPLAT_MUT_bit, /* kw splat hash can be modified (to avoid allocating a new one) */
29 VM_CALL__END
30};
31
32#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
33#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
34#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
35#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
36#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
37#define VM_CALL_BLOCKISEQ (0x01 << VM_CALL_BLOCKISEQ_bit)
38#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
39#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
40#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
41#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
42#define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
43#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
44#define VM_CALL_KW_SPLAT_MUT (0x01 << VM_CALL_KW_SPLAT_MUT_bit)
45
47 int keyword_len;
48 VALUE keywords[];
49};
50
51static inline size_t
52rb_callinfo_kwarg_bytes(int keyword_len)
53{
54 return rb_size_mul_add_or_raise(
55 keyword_len,
56 sizeof(VALUE),
57 sizeof(struct rb_callinfo_kwarg),
59}
60
61// imemo_callinfo
63 VALUE flags;
64 const struct rb_callinfo_kwarg *kwarg;
65 VALUE mid;
66 VALUE flag;
67 VALUE argc;
68};
69
70#ifndef USE_EMBED_CI
71#define USE_EMBED_CI 1
72#endif
73
74#if SIZEOF_VALUE == 8
75#define CI_EMBED_TAG_bits 1
76#define CI_EMBED_ARGC_bits 15
77#define CI_EMBED_FLAG_bits 16
78#define CI_EMBED_ID_bits 32
79#elif SIZEOF_VALUE == 4
80#define CI_EMBED_TAG_bits 1
81#define CI_EMBED_ARGC_bits 3
82#define CI_EMBED_FLAG_bits 13
83#define CI_EMBED_ID_bits 15
84#endif
85
86#if (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits + CI_EMBED_ID_bits) != (SIZEOF_VALUE * 8)
87#error
88#endif
89
90#define CI_EMBED_FLAG 0x01
91#define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
92#define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
93#define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
94#define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
95#define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
96#define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
97
98static inline bool
99vm_ci_packed_p(const struct rb_callinfo *ci)
100{
101#if USE_EMBED_CI
102 if (LIKELY(((VALUE)ci) & 0x01)) {
103 return 1;
104 }
105 else {
106 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
107 return 0;
108 }
109#else
110 return 0;
111#endif
112}
113
114static inline bool
115vm_ci_p(const struct rb_callinfo *ci)
116{
117 if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
118 return 1;
119 }
120 else {
121 return 0;
122 }
123}
124
125static inline ID
126vm_ci_mid(const struct rb_callinfo *ci)
127{
128 if (vm_ci_packed_p(ci)) {
129 return (((VALUE)ci) >> CI_EMBED_ID_SHFT) & CI_EMBED_ID_MASK;
130 }
131 else {
132 return (ID)ci->mid;
133 }
134}
135
136static inline unsigned int
137vm_ci_flag(const struct rb_callinfo *ci)
138{
139 if (vm_ci_packed_p(ci)) {
140 return (unsigned int)((((VALUE)ci) >> CI_EMBED_FLAG_SHFT) & CI_EMBED_FLAG_MASK);
141 }
142 else {
143 return (unsigned int)ci->flag;
144 }
145}
146
147static inline unsigned int
148vm_ci_argc(const struct rb_callinfo *ci)
149{
150 if (vm_ci_packed_p(ci)) {
151 return (unsigned int)((((VALUE)ci) >> CI_EMBED_ARGC_SHFT) & CI_EMBED_ARGC_MASK);
152 }
153 else {
154 return (unsigned int)ci->argc;
155 }
156}
157
158static inline const struct rb_callinfo_kwarg *
159vm_ci_kwarg(const struct rb_callinfo *ci)
160{
161 if (vm_ci_packed_p(ci)) {
162 return NULL;
163 }
164 else {
165 return ci->kwarg;
166 }
167}
168
169static inline void
170vm_ci_dump(const struct rb_callinfo *ci)
171{
172 if (vm_ci_packed_p(ci)) {
173 ruby_debug_printf("packed_ci ID:%s flag:%x argc:%u\n",
174 rb_id2name(vm_ci_mid(ci)), vm_ci_flag(ci), vm_ci_argc(ci));
175 }
176 else {
177 rp(ci);
178 }
179}
180
181#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
182#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
183
184/* This is passed to STATIC_ASSERT. Cannot be an inline function. */
185#define VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg) \
186 (((mid ) & ~CI_EMBED_ID_MASK) ? false : \
187 ((flag) & ~CI_EMBED_FLAG_MASK) ? false : \
188 ((argc) & ~CI_EMBED_ARGC_MASK) ? false : \
189 (kwarg) ? false : true)
190
191#define vm_ci_new_id(mid, flag, argc, must_zero) \
192 ((const struct rb_callinfo *) \
193 ((((VALUE)(mid )) << CI_EMBED_ID_SHFT) | \
194 (((VALUE)(flag)) << CI_EMBED_FLAG_SHFT) | \
195 (((VALUE)(argc)) << CI_EMBED_ARGC_SHFT) | \
196 RUBY_FIXNUM_FLAG))
197
198static inline const struct rb_callinfo *
199vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
200{
201#if USE_EMBED_CI
202 if (VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg)) {
203 RB_DEBUG_COUNTER_INC(ci_packed);
204 return vm_ci_new_id(mid, flag, argc, kwarg);
205 }
206#endif
207
208 const bool debug = 0;
209 if (debug) ruby_debug_printf("%s:%d ", file, line);
210
211 // TODO: dedup
212 const struct rb_callinfo *ci = (const struct rb_callinfo *)
213 rb_imemo_new(imemo_callinfo,
214 (VALUE)mid,
215 (VALUE)flag,
216 (VALUE)argc,
217 (VALUE)kwarg);
218 if (debug) rp(ci);
219 if (kwarg) {
220 RB_DEBUG_COUNTER_INC(ci_kw);
221 }
222 else {
223 RB_DEBUG_COUNTER_INC(ci_nokw);
224 }
225
226 VM_ASSERT(vm_ci_flag(ci) == flag);
227 VM_ASSERT(vm_ci_argc(ci) == argc);
228
229 return ci;
230}
231
232
233static inline const struct rb_callinfo *
234vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
235{
236 RB_DEBUG_COUNTER_INC(ci_runtime);
237 return vm_ci_new_(mid, flag, argc, kwarg, file, line);
238}
239
240#define VM_CALLINFO_NOT_UNDER_GC IMEMO_FL_USER0
241
242static inline bool
243vm_ci_markable(const struct rb_callinfo *ci)
244{
245 if (! ci) {
246 return false; /* or true? This is Qfalse... */
247 }
248 else if (vm_ci_packed_p(ci)) {
249 return true;
250 }
251 else {
252 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
253 return ! FL_ANY_RAW((VALUE)ci, VM_CALLINFO_NOT_UNDER_GC);
254 }
255}
256
257#define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
258 (struct rb_callinfo) { \
259 .flags = T_IMEMO | \
260 (imemo_callinfo << FL_USHIFT) | \
261 VM_CALLINFO_NOT_UNDER_GC, \
262 .mid = mid_, \
263 .flag = flags_, \
264 .argc = argc_, \
265 .kwarg = kwarg_, \
266 }
267
268typedef VALUE (*vm_call_handler)(
270 struct rb_control_frame_struct *cfp,
271 struct rb_calling_info *calling);
272
273// imemo_callcache
274
276 const VALUE flags;
277
278 /* inline cache: key */
279 const VALUE klass; // should not mark it because klass can not be free'd
280 // because of this marking. When klass is collected,
281 // cc will be cleared (cc->klass = 0) at vm_ccs_free().
282
283 /* inline cache: values */
284 const struct rb_callable_method_entry_struct * const cme_;
285 const vm_call_handler call_;
286
287 union {
288 struct {
289 uintptr_t value; // Shape ID in upper bits, index in lower bits
290 } attr;
291 const enum method_missing_reason method_missing_reason; /* used by method_missing */
292 VALUE v;
293 } aux_;
294};
295
296#define VM_CALLCACHE_UNMARKABLE FL_FREEZE
297#define VM_CALLCACHE_ON_STACK FL_EXIVAR
298
299extern const struct rb_callcache *rb_vm_empty_cc(void);
300extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
301
302#define vm_cc_empty() rb_vm_empty_cc()
303
304static inline void vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id);
305
306static inline void
307vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
308{
309 vm_cc_attr_index_set(cc, (attr_index_t)-1, shape_id);
310}
311
312static inline const struct rb_callcache *
313vm_cc_new(VALUE klass,
314 const struct rb_callable_method_entry_struct *cme,
315 vm_call_handler call)
316{
317 const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass);
318 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
319 RB_DEBUG_COUNTER_INC(cc_new);
320 return cc;
321}
322
323#define VM_CC_ON_STACK(clazz, call, aux, cme) \
324 (struct rb_callcache) { \
325 .flags = T_IMEMO | \
326 (imemo_callcache << FL_USHIFT) | \
327 VM_CALLCACHE_UNMARKABLE | \
328 VM_CALLCACHE_ON_STACK, \
329 .klass = clazz, \
330 .cme_ = cme, \
331 .call_ = call, \
332 .aux_ = aux, \
333 }
334
335static inline bool
336vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
337{
338 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
339 VM_ASSERT(cc->klass == 0 ||
340 RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
341 return cc->klass == klass;
342}
343
344static inline int
345vm_cc_markable(const struct rb_callcache *cc)
346{
347 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
348 return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_UNMARKABLE) == 0;
349}
350
351static inline const struct rb_callable_method_entry_struct *
352vm_cc_cme(const struct rb_callcache *cc)
353{
354 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
355 VM_ASSERT(cc->call_ == NULL || // not initialized yet
356 !vm_cc_markable(cc) ||
357 cc->cme_ != NULL);
358
359 return cc->cme_;
360}
361
362static inline vm_call_handler
363vm_cc_call(const struct rb_callcache *cc)
364{
365 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
366 VM_ASSERT(cc->call_ != NULL);
367 return cc->call_;
368}
369
370static inline attr_index_t
371vm_cc_attr_index(const struct rb_callcache *cc)
372{
373 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
374 return (attr_index_t)((cc->aux_.attr.value & SHAPE_FLAG_MASK) - 1);
375}
376
377static inline shape_id_t
378vm_cc_attr_index_dest_shape_id(const struct rb_callcache *cc)
379{
380 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
381
382 return cc->aux_.attr.value >> SHAPE_FLAG_SHIFT;
383}
384
385static inline void
386vm_cc_atomic_shape_and_index(const struct rb_callcache *cc, shape_id_t * shape_id, attr_index_t * index)
387{
388 uintptr_t cache_value = cc->aux_.attr.value; // Atomically read 64 bits
389 *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
390 *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
391 return;
392}
393
394static inline void
395vm_ic_atomic_shape_and_index(const struct iseq_inline_iv_cache_entry *ic, shape_id_t * shape_id, attr_index_t * index)
396{
397 uintptr_t cache_value = ic->value; // Atomically read 64 bits
398 *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
399 *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
400 return;
401}
402
403static inline shape_id_t
404vm_ic_attr_index_dest_shape_id(const struct iseq_inline_iv_cache_entry *ic)
405{
406 return (shape_id_t)(ic->value >> SHAPE_FLAG_SHIFT);
407}
408
409static inline unsigned int
410vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
411{
412 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
413 return cc->aux_.method_missing_reason;
414}
415
416static inline bool
417vm_cc_invalidated_p(const struct rb_callcache *cc)
418{
419 if (cc->klass && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
420 return false;
421 }
422 else {
423 return true;
424 }
425}
426
427// For MJIT. cc_cme is supposed to have inlined `vm_cc_cme(cc)`.
428static inline bool
429vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *cc_cme, VALUE klass)
430{
431 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
432 if (cc->klass == klass && !METHOD_ENTRY_INVALIDATED(cc_cme)) {
433 return 1;
434 }
435 else {
436 return 0;
437 }
438}
439
440/* callcache: mutate */
441
442static inline void
443vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
444{
445 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
446 VM_ASSERT(cc != vm_cc_empty());
447 *(vm_call_handler *)&cc->call_ = call;
448}
449
450static inline void
451vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id)
452{
453 uintptr_t *attr_value = (uintptr_t *)&cc->aux_.attr.value;
454 if (!vm_cc_markable(cc)) {
455 *attr_value = (uintptr_t)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT;
456 return;
457 }
458 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
459 VM_ASSERT(cc != vm_cc_empty());
460 *attr_value = (attr_index_t)(index + 1) | ((uintptr_t)(dest_shape_id) << SHAPE_FLAG_SHIFT);
461}
462
463static inline void
464vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t dest_shape_id)
465{
466 *(uintptr_t *)&ic->value = ((uintptr_t)dest_shape_id << SHAPE_FLAG_SHIFT) | (attr_index_t)(index + 1);
467}
468
469static inline void
470vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
471{
472 *(uintptr_t *)&ic->value = (uintptr_t)shape_id << SHAPE_FLAG_SHIFT;
473}
474
475static inline void
476vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missing_reason reason)
477{
478 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
479 VM_ASSERT(cc != vm_cc_empty());
480 *(enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
481}
482
483static inline void
484vm_cc_invalidate(const struct rb_callcache *cc)
485{
486 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
487 VM_ASSERT(cc != vm_cc_empty());
488 VM_ASSERT(cc->klass != 0); // should be enable
489
490 *(VALUE *)&cc->klass = 0;
491 RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
492}
493
494/* calldata */
495
497 const struct rb_callinfo *ci;
498 const struct rb_callcache *cc;
499};
500
502#if VM_CHECK_MODE > 0
503 VALUE debug_sig;
504#endif
505 int capa;
506 int len;
507 const struct rb_callable_method_entry_struct *cme;
509 const struct rb_callinfo *ci;
510 const struct rb_callcache *cc;
511 } *entries;
512};
513
514#if VM_CHECK_MODE > 0
515
516const rb_callable_method_entry_t *rb_vm_lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
517void rb_vm_dump_overloaded_cme_table(void);
518
519static inline bool
520vm_ccs_p(const struct rb_class_cc_entries *ccs)
521{
522 return ccs->debug_sig == ~(VALUE)ccs;
523}
524
525static inline bool
526vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme)
527{
528 if (vm_cc_cme(cc) == cme ||
529 (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme))) {
530 return true;
531 }
532 else {
533#if 1
534 // debug print
535
536 fprintf(stderr, "iseq_overload:%d\n", (int)cme->def->iseq_overload);
537 rp(cme);
538 rp(vm_cc_cme(cc));
539 rb_vm_lookup_overloaded_cme(cme);
540#endif
541 return false;
542 }
543}
544
545#endif
546
547// gc.c
548void rb_vm_ccs_free(struct rb_class_cc_entries *ccs);
549
550#endif /* RUBY_VM_CALLINFO_H */
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:140
#define FL_ANY_RAW
Old name of RB_FL_ANY_RAW.
Definition fl_type.h:134
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1089
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition symbol.c:959
Definition vm_core.h:276
Definition method.h:62
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:375