Ruby 3.3.6p108 (2024-11-05 revision 75015d4c1f6965b5e85e96fb309f1f2129f933c0)
vm_callinfo.h
1#ifndef RUBY_VM_CALLINFO_H /*-*-C-*-vi:se ft=c:*/
2#define RUBY_VM_CALLINFO_H
11#include "debug_counter.h"
12#include "internal/class.h"
13#include "shape.h"
14
15enum vm_call_flag_bits {
16 VM_CALL_ARGS_SPLAT_bit, // m(*args)
17 VM_CALL_ARGS_BLOCKARG_bit, // m(&block)
18 VM_CALL_FCALL_bit, // m(args) # receiver is self
19 VM_CALL_VCALL_bit, // m # method call that looks like a local variable
20 VM_CALL_ARGS_SIMPLE_bit, // (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL
21 VM_CALL_KWARG_bit, // has kwarg
22 VM_CALL_KW_SPLAT_bit, // m(**opts)
23 VM_CALL_TAILCALL_bit, // located at tail position
24 VM_CALL_SUPER_bit, // super
25 VM_CALL_ZSUPER_bit, // zsuper
26 VM_CALL_OPT_SEND_bit, // internal flag
27 VM_CALL_KW_SPLAT_MUT_bit, // kw splat hash can be modified (to avoid allocating a new one)
28 VM_CALL__END
29};
30
31#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
32#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
33#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
34#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
35#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
36#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
37#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
38#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
39#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
40#define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
41#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
42#define VM_CALL_KW_SPLAT_MUT (0x01 << VM_CALL_KW_SPLAT_MUT_bit)
43
45 int keyword_len;
46 int references;
47 VALUE keywords[];
48};
49
50static inline size_t
51rb_callinfo_kwarg_bytes(int keyword_len)
52{
53 return rb_size_mul_add_or_raise(
54 keyword_len,
55 sizeof(VALUE),
56 sizeof(struct rb_callinfo_kwarg),
58}
59
60// imemo_callinfo
62 VALUE flags;
63 const struct rb_callinfo_kwarg *kwarg;
64 VALUE mid;
65 VALUE flag;
66 VALUE argc;
67};
68
69#if !defined(USE_EMBED_CI) || (USE_EMBED_CI+0)
70#undef USE_EMBED_CI
71#define USE_EMBED_CI 1
72#else
73#undef USE_EMBED_CI
74#define USE_EMBED_CI 0
75#endif
76
77#if SIZEOF_VALUE == 8
78#define CI_EMBED_TAG_bits 1
79#define CI_EMBED_ARGC_bits 15
80#define CI_EMBED_FLAG_bits 16
81#define CI_EMBED_ID_bits 32
82#elif SIZEOF_VALUE == 4
83#define CI_EMBED_TAG_bits 1
84#define CI_EMBED_ARGC_bits 3
85#define CI_EMBED_FLAG_bits 13
86#define CI_EMBED_ID_bits 15
87#endif
88
89#if (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits + CI_EMBED_ID_bits) != (SIZEOF_VALUE * 8)
90#error
91#endif
92
93#define CI_EMBED_FLAG 0x01
94#define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
95#define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
96#define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
97#define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
98#define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
99#define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
100
101static inline bool
102vm_ci_packed_p(const struct rb_callinfo *ci)
103{
104 if (!USE_EMBED_CI) {
105 return 0;
106 }
107 if (LIKELY(((VALUE)ci) & 0x01)) {
108 return 1;
109 }
110 else {
111 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
112 return 0;
113 }
114}
115
116static inline bool
117vm_ci_p(const struct rb_callinfo *ci)
118{
119 if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
120 return 1;
121 }
122 else {
123 return 0;
124 }
125}
126
127static inline ID
128vm_ci_mid(const struct rb_callinfo *ci)
129{
130 if (vm_ci_packed_p(ci)) {
131 return (((VALUE)ci) >> CI_EMBED_ID_SHFT) & CI_EMBED_ID_MASK;
132 }
133 else {
134 return (ID)ci->mid;
135 }
136}
137
138static inline unsigned int
139vm_ci_flag(const struct rb_callinfo *ci)
140{
141 if (vm_ci_packed_p(ci)) {
142 return (unsigned int)((((VALUE)ci) >> CI_EMBED_FLAG_SHFT) & CI_EMBED_FLAG_MASK);
143 }
144 else {
145 return (unsigned int)ci->flag;
146 }
147}
148
149static inline unsigned int
150vm_ci_argc(const struct rb_callinfo *ci)
151{
152 if (vm_ci_packed_p(ci)) {
153 return (unsigned int)((((VALUE)ci) >> CI_EMBED_ARGC_SHFT) & CI_EMBED_ARGC_MASK);
154 }
155 else {
156 return (unsigned int)ci->argc;
157 }
158}
159
160static inline const struct rb_callinfo_kwarg *
161vm_ci_kwarg(const struct rb_callinfo *ci)
162{
163 if (vm_ci_packed_p(ci)) {
164 return NULL;
165 }
166 else {
167 return ci->kwarg;
168 }
169}
170
171static inline void
172vm_ci_dump(const struct rb_callinfo *ci)
173{
174 if (vm_ci_packed_p(ci)) {
175 ruby_debug_printf("packed_ci ID:%s flag:%x argc:%u\n",
176 rb_id2name(vm_ci_mid(ci)), vm_ci_flag(ci), vm_ci_argc(ci));
177 }
178 else {
179 rp(ci);
180 }
181}
182
183#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
184#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
185
186/* This is passed to STATIC_ASSERT. Cannot be an inline function. */
187#define VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg) \
188 (((mid ) & ~CI_EMBED_ID_MASK) ? false : \
189 ((flag) & ~CI_EMBED_FLAG_MASK) ? false : \
190 ((argc) & ~CI_EMBED_ARGC_MASK) ? false : \
191 (kwarg) ? false : true)
192
193#define vm_ci_new_id(mid, flag, argc, must_zero) \
194 ((const struct rb_callinfo *) \
195 ((((VALUE)(mid )) << CI_EMBED_ID_SHFT) | \
196 (((VALUE)(flag)) << CI_EMBED_FLAG_SHFT) | \
197 (((VALUE)(argc)) << CI_EMBED_ARGC_SHFT) | \
198 RUBY_FIXNUM_FLAG))
199
200// vm_method.c
201const struct rb_callinfo *rb_vm_ci_lookup(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg);
202void rb_vm_ci_free(const struct rb_callinfo *);
203
204static inline const struct rb_callinfo *
205vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
206{
207 if (USE_EMBED_CI && VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg)) {
208 RB_DEBUG_COUNTER_INC(ci_packed);
209 return vm_ci_new_id(mid, flag, argc, kwarg);
210 }
211
212 const bool debug = 0;
213 if (debug) ruby_debug_printf("%s:%d ", file, line);
214
215 const struct rb_callinfo *ci = rb_vm_ci_lookup(mid, flag, argc, kwarg);
216 if (debug) rp(ci);
217 if (kwarg) {
218 RB_DEBUG_COUNTER_INC(ci_kw);
219 }
220 else {
221 RB_DEBUG_COUNTER_INC(ci_nokw);
222 }
223
224 VM_ASSERT(vm_ci_flag(ci) == flag);
225 VM_ASSERT(vm_ci_argc(ci) == argc);
226
227 return ci;
228}
229
230
231static inline const struct rb_callinfo *
232vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
233{
234 RB_DEBUG_COUNTER_INC(ci_runtime);
235 return vm_ci_new_(mid, flag, argc, kwarg, file, line);
236}
237
238#define VM_CALLINFO_NOT_UNDER_GC IMEMO_FL_USER0
239
240static inline bool
241vm_ci_markable(const struct rb_callinfo *ci)
242{
243 if (! ci) {
244 return false; /* or true? This is Qfalse... */
245 }
246 else if (vm_ci_packed_p(ci)) {
247 return true;
248 }
249 else {
250 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
251 return ! FL_ANY_RAW((VALUE)ci, VM_CALLINFO_NOT_UNDER_GC);
252 }
253}
254
255#define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
256 (struct rb_callinfo) { \
257 .flags = T_IMEMO | \
258 (imemo_callinfo << FL_USHIFT) | \
259 VM_CALLINFO_NOT_UNDER_GC, \
260 .mid = mid_, \
261 .flag = flags_, \
262 .argc = argc_, \
263 .kwarg = kwarg_, \
264 }
265
266typedef VALUE (*vm_call_handler)(
268 struct rb_control_frame_struct *cfp,
269 struct rb_calling_info *calling);
270
271// imemo_callcache
272
274 const VALUE flags;
275
276 /* inline cache: key */
277 const VALUE klass; // should not mark it because klass can not be free'd
278 // because of this marking. When klass is collected,
279 // cc will be cleared (cc->klass = 0) at vm_ccs_free().
280
281 /* inline cache: values */
282 const struct rb_callable_method_entry_struct * const cme_;
283 const vm_call_handler call_;
284
285 union {
286 struct {
287 uintptr_t value; // Shape ID in upper bits, index in lower bits
288 } attr;
289 const enum method_missing_reason method_missing_reason; /* used by method_missing */
290 VALUE v;
291 const struct rb_builtin_function *bf;
292 } aux_;
293};
294
295#define VM_CALLCACHE_UNMARKABLE FL_FREEZE
296#define VM_CALLCACHE_ON_STACK FL_EXIVAR
297
298/* VM_CALLCACHE_IVAR used for IVAR/ATTRSET/STRUCT_AREF/STRUCT_ASET methods */
299#define VM_CALLCACHE_IVAR IMEMO_FL_USER0
300#define VM_CALLCACHE_BF IMEMO_FL_USER1
301#define VM_CALLCACHE_SUPER IMEMO_FL_USER2
302#define VM_CALLCACHE_REFINEMENT IMEMO_FL_USER3
303
304enum vm_cc_type {
305 cc_type_normal, // chained from ccs
306 cc_type_super,
307 cc_type_refinement,
308};
309
310extern const struct rb_callcache *rb_vm_empty_cc(void);
311extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
312
313#define vm_cc_empty() rb_vm_empty_cc()
314
315static inline void vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id);
316
317static inline void
318vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
319{
320 vm_cc_attr_index_set(cc, (attr_index_t)-1, shape_id);
321}
322
323static inline const struct rb_callcache *
324vm_cc_new(VALUE klass,
325 const struct rb_callable_method_entry_struct *cme,
326 vm_call_handler call,
327 enum vm_cc_type type)
328{
329 const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass);
330
331 switch (type) {
332 case cc_type_normal:
333 break;
334 case cc_type_super:
335 *(VALUE *)&cc->flags |= VM_CALLCACHE_SUPER;
336 break;
337 case cc_type_refinement:
338 *(VALUE *)&cc->flags |= VM_CALLCACHE_REFINEMENT;
339 break;
340 }
341
342 vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
343 RB_DEBUG_COUNTER_INC(cc_new);
344 return cc;
345}
346
347static inline bool
348vm_cc_super_p(const struct rb_callcache *cc)
349{
350 return (cc->flags & VM_CALLCACHE_SUPER) != 0;
351}
352
353static inline bool
354vm_cc_refinement_p(const struct rb_callcache *cc)
355{
356 return (cc->flags & VM_CALLCACHE_REFINEMENT) != 0;
357}
358
359#define VM_CC_ON_STACK(clazz, call, aux, cme) \
360 (struct rb_callcache) { \
361 .flags = T_IMEMO | \
362 (imemo_callcache << FL_USHIFT) | \
363 VM_CALLCACHE_UNMARKABLE | \
364 VM_CALLCACHE_ON_STACK, \
365 .klass = clazz, \
366 .cme_ = cme, \
367 .call_ = call, \
368 .aux_ = aux, \
369 }
370
371static inline bool
372vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
373{
374 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
375 VM_ASSERT(cc->klass == 0 ||
376 RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
377 return cc->klass == klass;
378}
379
380static inline int
381vm_cc_markable(const struct rb_callcache *cc)
382{
383 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
384 return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_UNMARKABLE) == 0;
385}
386
387static inline const struct rb_callable_method_entry_struct *
388vm_cc_cme(const struct rb_callcache *cc)
389{
390 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
391 VM_ASSERT(cc->call_ == NULL || // not initialized yet
392 !vm_cc_markable(cc) ||
393 cc->cme_ != NULL);
394
395 return cc->cme_;
396}
397
398static inline vm_call_handler
399vm_cc_call(const struct rb_callcache *cc)
400{
401 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
402 VM_ASSERT(cc->call_ != NULL);
403 return cc->call_;
404}
405
406static inline attr_index_t
407vm_cc_attr_index(const struct rb_callcache *cc)
408{
409 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
410 return (attr_index_t)((cc->aux_.attr.value & SHAPE_FLAG_MASK) - 1);
411}
412
413static inline shape_id_t
414vm_cc_attr_index_dest_shape_id(const struct rb_callcache *cc)
415{
416 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
417
418 return cc->aux_.attr.value >> SHAPE_FLAG_SHIFT;
419}
420
421static inline void
422vm_cc_atomic_shape_and_index(const struct rb_callcache *cc, shape_id_t * shape_id, attr_index_t * index)
423{
424 uintptr_t cache_value = cc->aux_.attr.value; // Atomically read 64 bits
425 *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
426 *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
427 return;
428}
429
430static inline void
431vm_ic_atomic_shape_and_index(const struct iseq_inline_iv_cache_entry *ic, shape_id_t * shape_id, attr_index_t * index)
432{
433 uintptr_t cache_value = ic->value; // Atomically read 64 bits
434 *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT);
435 *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1;
436 return;
437}
438
439static inline shape_id_t
440vm_ic_attr_index_dest_shape_id(const struct iseq_inline_iv_cache_entry *ic)
441{
442 return (shape_id_t)(ic->value >> SHAPE_FLAG_SHIFT);
443}
444
445static inline unsigned int
446vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
447{
448 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
449 return cc->aux_.method_missing_reason;
450}
451
452static inline bool
453vm_cc_invalidated_p(const struct rb_callcache *cc)
454{
455 if (cc->klass && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
456 return false;
457 }
458 else {
459 return true;
460 }
461}
462
463// For RJIT. cc_cme is supposed to have inlined `vm_cc_cme(cc)`.
464static inline bool
465vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *cc_cme, VALUE klass)
466{
467 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
468 if (cc->klass == klass && !METHOD_ENTRY_INVALIDATED(cc_cme)) {
469 return 1;
470 }
471 else {
472 return 0;
473 }
474}
475
476/* callcache: mutate */
477
478static inline void
479vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
480{
481 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
482 VM_ASSERT(cc != vm_cc_empty());
483 *(vm_call_handler *)&cc->call_ = call;
484}
485
486static inline void
487set_vm_cc_ivar(const struct rb_callcache *cc)
488{
489 *(VALUE *)&cc->flags |= VM_CALLCACHE_IVAR;
490}
491
492static inline void
493vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id)
494{
495 uintptr_t *attr_value = (uintptr_t *)&cc->aux_.attr.value;
496 if (!vm_cc_markable(cc)) {
497 *attr_value = (uintptr_t)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT;
498 return;
499 }
500 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
501 VM_ASSERT(cc != vm_cc_empty());
502 *attr_value = (attr_index_t)(index + 1) | ((uintptr_t)(dest_shape_id) << SHAPE_FLAG_SHIFT);
503 set_vm_cc_ivar(cc);
504}
505
506static inline bool
507vm_cc_ivar_p(const struct rb_callcache *cc)
508{
509 return (cc->flags & VM_CALLCACHE_IVAR) != 0;
510}
511
512static inline void
513vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t dest_shape_id)
514{
515 *(uintptr_t *)&ic->value = ((uintptr_t)dest_shape_id << SHAPE_FLAG_SHIFT) | (attr_index_t)(index + 1);
516}
517
518static inline void
519vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
520{
521 *(uintptr_t *)&ic->value = (uintptr_t)shape_id << SHAPE_FLAG_SHIFT;
522}
523
524static inline void
525vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missing_reason reason)
526{
527 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
528 VM_ASSERT(cc != vm_cc_empty());
529 *(enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
530}
531
532static inline void
533vm_cc_bf_set(const struct rb_callcache *cc, const struct rb_builtin_function *bf)
534{
535 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
536 VM_ASSERT(cc != vm_cc_empty());
537 *(const struct rb_builtin_function **)&cc->aux_.bf = bf;
538 *(VALUE *)&cc->flags |= VM_CALLCACHE_BF;
539}
540
541static inline bool
542vm_cc_bf_p(const struct rb_callcache *cc)
543{
544 return (cc->flags & VM_CALLCACHE_BF) != 0;
545}
546
547static inline void
548vm_cc_invalidate(const struct rb_callcache *cc)
549{
550 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
551 VM_ASSERT(cc != vm_cc_empty());
552 VM_ASSERT(cc->klass != 0); // should be enable
553
554 *(VALUE *)&cc->klass = 0;
555 RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
556}
557
558/* calldata */
559
561 const struct rb_callinfo *ci;
562 const struct rb_callcache *cc;
563};
564
566#if VM_CHECK_MODE > 0
567 VALUE debug_sig;
568#endif
569 int capa;
570 int len;
571 const struct rb_callable_method_entry_struct *cme;
573 const struct rb_callinfo *ci;
574 const struct rb_callcache *cc;
575 } *entries;
576};
577
578#if VM_CHECK_MODE > 0
579
580const rb_callable_method_entry_t *rb_vm_lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
581void rb_vm_dump_overloaded_cme_table(void);
582
583static inline bool
584vm_ccs_p(const struct rb_class_cc_entries *ccs)
585{
586 return ccs->debug_sig == ~(VALUE)ccs;
587}
588
589static inline bool
590vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme)
591{
592 if (vm_cc_cme(cc) == cme ||
593 (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme))) {
594 return true;
595 }
596 else {
597#if 1
598 // debug print
599
600 fprintf(stderr, "iseq_overload:%d\n", (int)cme->def->iseq_overload);
601 rp(cme);
602 rp(vm_cc_cme(cc));
603 rb_vm_lookup_overloaded_cme(cme);
604#endif
605 return false;
606 }
607}
608
609#endif
610
611// gc.c
612void rb_vm_ccs_free(struct rb_class_cc_entries *ccs);
613
614#endif /* RUBY_VM_CALLINFO_H */
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define FL_ANY_RAW
Old name of RB_FL_ANY_RAW.
Definition fl_type.h:126
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1342
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition vm_core.h:262
Definition method.h:62
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40