Ruby 3.3.6p108 (2024-11-05 revision 75015d4c1f6965b5e85e96fb309f1f2129f933c0)
thread.c
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "rjit.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#if USE_RJIT && defined(HAVE_SYS_WAIT_H)
104#include <sys/wait.h>
105#endif
106
107#ifndef USE_NATIVE_THREAD_PRIORITY
108#define USE_NATIVE_THREAD_PRIORITY 0
109#define RUBY_THREAD_PRIORITY_MAX 3
110#define RUBY_THREAD_PRIORITY_MIN -3
111#endif
112
113static VALUE rb_cThreadShield;
114
115static VALUE sym_immediate;
116static VALUE sym_on_blocking;
117static VALUE sym_never;
118
119#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
121
122static inline VALUE
123rb_thread_local_storage(VALUE thread)
124{
125 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
126 rb_ivar_set(thread, idLocals, rb_hash_new());
127 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
128 }
129 return rb_ivar_get(thread, idLocals);
130}
131
132enum SLEEP_FLAGS {
133 SLEEP_DEADLOCKABLE = 0x01,
134 SLEEP_SPURIOUS_CHECK = 0x02,
135 SLEEP_ALLOW_SPURIOUS = 0x04,
136 SLEEP_NO_CHECKINTS = 0x08,
137};
138
139static void sleep_forever(rb_thread_t *th, unsigned int fl);
140static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
141
142static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
143static int rb_threadptr_dead(rb_thread_t *th);
144static void rb_check_deadlock(rb_ractor_t *r);
145static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
146static const char *thread_status_name(rb_thread_t *th, int detail);
147static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
148NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
149MAYBE_UNUSED(static int consume_communication_pipe(int fd));
150
151static volatile int system_working = 1;
152static rb_internal_thread_specific_key_t specific_key_count;
153
155 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
156 rb_thread_t *th;
157 int fd;
158 struct rb_io_close_wait_list *busy;
159};
160
161/********************************************************************************/
162
163#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
164
166 enum rb_thread_status prev_status;
167};
168
169static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
170static void unblock_function_clear(rb_thread_t *th);
171
172static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
173 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
174static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
175
176#define THREAD_BLOCKING_BEGIN(th) do { \
177 struct rb_thread_sched * const sched = TH_SCHED(th); \
178 RB_VM_SAVE_MACHINE_CONTEXT(th); \
179 thread_sched_to_waiting((sched), (th));
180
181#define THREAD_BLOCKING_END(th) \
182 thread_sched_to_running((sched), (th)); \
183 rb_ractor_thread_switch(th->ractor, th); \
184} while(0)
185
186#ifdef __GNUC__
187#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
188#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
189#else
190#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
191#endif
192#else
193#define only_if_constant(expr, notconst) notconst
194#endif
195#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
196 struct rb_blocking_region_buffer __region; \
197 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
198 /* always return true unless fail_if_interrupted */ \
199 !only_if_constant(fail_if_interrupted, TRUE)) { \
200 /* Important that this is inlined into the macro, and not part of \
201 * blocking_region_begin - see bug #20493 */ \
202 RB_VM_SAVE_MACHINE_CONTEXT(th); \
203 thread_sched_to_waiting(TH_SCHED(th), th); \
204 exec; \
205 blocking_region_end(th, &__region); \
206 }; \
207} while(0)
208
209/*
210 * returns true if this thread was spuriously interrupted, false otherwise
211 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
212 */
213#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
214static inline int
215vm_check_ints_blocking(rb_execution_context_t *ec)
216{
217 rb_thread_t *th = rb_ec_thread_ptr(ec);
218
219 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
220 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
221 }
222 else {
223 th->pending_interrupt_queue_checked = 0;
224 RUBY_VM_SET_INTERRUPT(ec);
225 }
226 return rb_threadptr_execute_interrupts(th, 1);
227}
228
229int
230rb_vm_check_ints_blocking(rb_execution_context_t *ec)
231{
232 return vm_check_ints_blocking(ec);
233}
234
235/*
236 * poll() is supported by many OSes, but so far Linux is the only
237 * one we know of that supports using poll() in all places select()
238 * would work.
239 */
240#if defined(HAVE_POLL)
241# if defined(__linux__)
242# define USE_POLL
243# endif
244# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
245# define USE_POLL
246 /* FreeBSD does not set POLLOUT when POLLHUP happens */
247# define POLLERR_SET (POLLHUP | POLLERR)
248# endif
249#endif
250
251static void
252timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
253 const struct timeval *timeout)
254{
255 if (timeout) {
256 *rel = rb_timeval2hrtime(timeout);
257 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
258 *to = rel;
259 }
260 else {
261 *to = 0;
262 }
263}
264
265MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
266MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
267MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
268
269#include THREAD_IMPL_SRC
270
271/*
272 * TODO: somebody with win32 knowledge should be able to get rid of
273 * timer-thread by busy-waiting on signals. And it should be possible
274 * to make the GVL in thread_pthread.c be platform-independent.
275 */
276#ifndef BUSY_WAIT_SIGNALS
277# define BUSY_WAIT_SIGNALS (0)
278#endif
279
280#ifndef USE_EVENTFD
281# define USE_EVENTFD (0)
282#endif
283
284#include "thread_sync.c"
285
286void
287rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
288{
290}
291
292void
293rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
294{
296}
297
298void
299rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
300{
302}
303
304void
305rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
306{
308}
309
310static int
311unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
312{
313 do {
314 if (fail_if_interrupted) {
315 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
316 return FALSE;
317 }
318 }
319 else {
320 RUBY_VM_CHECK_INTS(th->ec);
321 }
322
323 rb_native_mutex_lock(&th->interrupt_lock);
324 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
325 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
326
327 VM_ASSERT(th->unblock.func == NULL);
328
329 th->unblock.func = func;
330 th->unblock.arg = arg;
331 rb_native_mutex_unlock(&th->interrupt_lock);
332
333 return TRUE;
334}
335
336static void
337unblock_function_clear(rb_thread_t *th)
338{
339 rb_native_mutex_lock(&th->interrupt_lock);
340 th->unblock.func = 0;
341 rb_native_mutex_unlock(&th->interrupt_lock);
342}
343
344static void
345rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
346{
347 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
348
349 rb_native_mutex_lock(&th->interrupt_lock);
350 {
351 if (trap) {
352 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
353 }
354 else {
355 RUBY_VM_SET_INTERRUPT(th->ec);
356 }
357
358 if (th->unblock.func != NULL) {
359 (th->unblock.func)(th->unblock.arg);
360 }
361 else {
362 /* none */
363 }
364 }
365 rb_native_mutex_unlock(&th->interrupt_lock);
366}
367
368void
369rb_threadptr_interrupt(rb_thread_t *th)
370{
371 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
372 rb_threadptr_interrupt_common(th, 0);
373}
374
375static void
376threadptr_trap_interrupt(rb_thread_t *th)
377{
378 rb_threadptr_interrupt_common(th, 1);
379}
380
381static void
382terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
383{
384 rb_thread_t *th = 0;
385
386 ccan_list_for_each(&r->threads.set, th, lt_node) {
387 if (th != main_thread) {
388 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
389
390 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
391 rb_threadptr_interrupt(th);
392
393 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
394 }
395 else {
396 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
397 }
398 }
399}
400
401static void
402rb_threadptr_join_list_wakeup(rb_thread_t *thread)
403{
404 while (thread->join_list) {
405 struct rb_waiting_list *join_list = thread->join_list;
406
407 // Consume the entry from the join list:
408 thread->join_list = join_list->next;
409
410 rb_thread_t *target_thread = join_list->thread;
411
412 if (target_thread->scheduler != Qnil && join_list->fiber) {
413 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
414 }
415 else {
416 rb_threadptr_interrupt(target_thread);
417
418 switch (target_thread->status) {
419 case THREAD_STOPPED:
420 case THREAD_STOPPED_FOREVER:
421 target_thread->status = THREAD_RUNNABLE;
422 break;
423 default:
424 break;
425 }
426 }
427 }
428}
429
430void
431rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
432{
433 while (th->keeping_mutexes) {
434 rb_mutex_t *mutex = th->keeping_mutexes;
435 th->keeping_mutexes = mutex->next_mutex;
436
437 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
438
439 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
440 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
441 }
442}
443
444void
445rb_thread_terminate_all(rb_thread_t *th)
446{
447 rb_ractor_t *cr = th->ractor;
448 rb_execution_context_t * volatile ec = th->ec;
449 volatile int sleeping = 0;
450
451 if (cr->threads.main != th) {
452 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
453 (void *)cr->threads.main, (void *)th);
454 }
455
456 /* unlock all locking mutexes */
457 rb_threadptr_unlock_all_locking_mutexes(th);
458
459 EC_PUSH_TAG(ec);
460 if (EC_EXEC_TAG() == TAG_NONE) {
461 retry:
462 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
463
464 terminate_all(cr, th);
465
466 while (rb_ractor_living_thread_num(cr) > 1) {
467 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
468 /*q
469 * Thread exiting routine in thread_start_func_2 notify
470 * me when the last sub-thread exit.
471 */
472 sleeping = 1;
473 native_sleep(th, &rel);
474 RUBY_VM_CHECK_INTS_BLOCKING(ec);
475 sleeping = 0;
476 }
477 }
478 else {
479 /*
480 * When caught an exception (e.g. Ctrl+C), let's broadcast
481 * kill request again to ensure killing all threads even
482 * if they are blocked on sleep, mutex, etc.
483 */
484 if (sleeping) {
485 sleeping = 0;
486 goto retry;
487 }
488 }
489 EC_POP_TAG();
490}
491
492void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
493
494static void
495thread_cleanup_func_before_exec(void *th_ptr)
496{
497 rb_thread_t *th = th_ptr;
498 th->status = THREAD_KILLED;
499
500 // The thread stack doesn't exist in the forked process:
501 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
502
503 rb_threadptr_root_fiber_terminate(th);
504}
505
506static void
507thread_cleanup_func(void *th_ptr, int atfork)
508{
509 rb_thread_t *th = th_ptr;
510
511 th->locking_mutex = Qfalse;
512 thread_cleanup_func_before_exec(th_ptr);
513
514 /*
515 * Unfortunately, we can't release native threading resource at fork
516 * because libc may have unstable locking state therefore touching
517 * a threading resource may cause a deadlock.
518 */
519 if (atfork) {
520 th->nt = NULL;
521 return;
522 }
523
524 rb_native_mutex_destroy(&th->interrupt_lock);
525}
526
527static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
528static VALUE rb_thread_to_s(VALUE thread);
529
530void
531ruby_thread_init_stack(rb_thread_t *th)
532{
533 native_thread_init_stack(th);
534}
535
536const VALUE *
537rb_vm_proc_local_ep(VALUE proc)
538{
539 const VALUE *ep = vm_proc_ep(proc);
540
541 if (ep) {
542 return rb_vm_ep_local_ep(ep);
543 }
544 else {
545 return NULL;
546 }
547}
548
549// for ractor, defined in vm.c
550VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
551 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
552
553static VALUE
554thread_do_start_proc(rb_thread_t *th)
555{
556 VALUE args = th->invoke_arg.proc.args;
557 const VALUE *args_ptr;
558 int args_len;
559 VALUE procval = th->invoke_arg.proc.proc;
560 rb_proc_t *proc;
561 GetProcPtr(procval, proc);
562
563 th->ec->errinfo = Qnil;
564 th->ec->root_lep = rb_vm_proc_local_ep(procval);
565 th->ec->root_svar = Qfalse;
566
567 vm_check_ints_blocking(th->ec);
568
569 if (th->invoke_type == thread_invoke_type_ractor_proc) {
570 VALUE self = rb_ractor_self(th->ractor);
571 VM_ASSERT(FIXNUM_P(args));
572 args_len = FIX2INT(args);
573 args_ptr = ALLOCA_N(VALUE, args_len);
574 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
575 vm_check_ints_blocking(th->ec);
576
577 return rb_vm_invoke_proc_with_self(
578 th->ec, proc, self,
579 args_len, args_ptr,
580 th->invoke_arg.proc.kw_splat,
581 VM_BLOCK_HANDLER_NONE
582 );
583 }
584 else {
585 args_len = RARRAY_LENINT(args);
586 if (args_len < 8) {
587 /* free proc.args if the length is enough small */
588 args_ptr = ALLOCA_N(VALUE, args_len);
589 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
590 th->invoke_arg.proc.args = Qnil;
591 }
592 else {
593 args_ptr = RARRAY_CONST_PTR(args);
594 }
595
596 vm_check_ints_blocking(th->ec);
597
598 return rb_vm_invoke_proc(
599 th->ec, proc,
600 args_len, args_ptr,
601 th->invoke_arg.proc.kw_splat,
602 VM_BLOCK_HANDLER_NONE
603 );
604 }
605}
606
607static VALUE
608thread_do_start(rb_thread_t *th)
609{
610 native_set_thread_name(th);
611 VALUE result = Qundef;
612
613 switch (th->invoke_type) {
614 case thread_invoke_type_proc:
615 result = thread_do_start_proc(th);
616 break;
617
618 case thread_invoke_type_ractor_proc:
619 result = thread_do_start_proc(th);
620 rb_ractor_atexit(th->ec, result);
621 break;
622
623 case thread_invoke_type_func:
624 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
625 break;
626
627 case thread_invoke_type_none:
628 rb_bug("unreachable");
629 }
630
631 return result;
632}
633
634void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
635
636static int
637thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
638{
639 STACK_GROW_DIR_DETECTION;
640
641 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
642 VM_ASSERT(th != th->vm->ractor.main_thread);
643
644 enum ruby_tag_type state;
645 VALUE errinfo = Qnil;
646 rb_thread_t *ractor_main_th = th->ractor->threads.main;
647
648 // setup ractor
649 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
650 RB_VM_LOCK();
651 {
652 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
653 rb_ractor_t *r = th->ractor;
654 r->r_stdin = rb_io_prep_stdin();
655 r->r_stdout = rb_io_prep_stdout();
656 r->r_stderr = rb_io_prep_stderr();
657 }
658 RB_VM_UNLOCK();
659 }
660
661 // Ensure that we are not joinable.
662 VM_ASSERT(UNDEF_P(th->value));
663
664 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
665 VALUE result = Qundef;
666
667 EC_PUSH_TAG(th->ec);
668
669 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
670 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
671
672 SAVE_ROOT_JMPBUF(th, result = thread_do_start(th));
673 }
674
675 if (!fiber_scheduler_closed) {
676 fiber_scheduler_closed = 1;
678 }
679
680 if (!event_thread_end_hooked) {
681 event_thread_end_hooked = 1;
682 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
683 }
684
685 if (state == TAG_NONE) {
686 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
687 th->value = result;
688 } else {
689 errinfo = th->ec->errinfo;
690
691 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
692 if (!NIL_P(exc)) errinfo = exc;
693
694 if (state == TAG_FATAL) {
695 if (th->invoke_type == thread_invoke_type_ractor_proc) {
696 rb_ractor_atexit(th->ec, Qnil);
697 }
698 /* fatal error within this thread, need to stop whole script */
699 }
700 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
701 /* exit on main_thread. */
702 }
703 else {
704 if (th->report_on_exception) {
705 VALUE mesg = rb_thread_to_s(th->self);
706 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
707 rb_write_error_str(mesg);
708 rb_ec_error_print(th->ec, errinfo);
709 }
710
711 if (th->invoke_type == thread_invoke_type_ractor_proc) {
712 rb_ractor_atexit_exception(th->ec);
713 }
714
715 if (th->vm->thread_abort_on_exception ||
716 th->abort_on_exception || RTEST(ruby_debug)) {
717 /* exit on main_thread */
718 }
719 else {
720 errinfo = Qnil;
721 }
722 }
723 th->value = Qnil;
724 }
725
726 // The thread is effectively finished and can be joined.
727 VM_ASSERT(!UNDEF_P(th->value));
728
729 rb_threadptr_join_list_wakeup(th);
730 rb_threadptr_unlock_all_locking_mutexes(th);
731
732 if (th->invoke_type == thread_invoke_type_ractor_proc) {
733 rb_thread_terminate_all(th);
734 rb_ractor_teardown(th->ec);
735 }
736
737 th->status = THREAD_KILLED;
738 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
739
740 if (th->vm->ractor.main_thread == th) {
741 ruby_stop(0);
742 }
743
744 if (RB_TYPE_P(errinfo, T_OBJECT)) {
745 /* treat with normal error object */
746 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
747 }
748
749 EC_POP_TAG();
750
751 rb_ec_clear_current_thread_trace_func(th->ec);
752
753 /* locking_mutex must be Qfalse */
754 if (th->locking_mutex != Qfalse) {
755 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
756 (void *)th, th->locking_mutex);
757 }
758
759 if (ractor_main_th->status == THREAD_KILLED &&
760 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
761 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
762 rb_threadptr_interrupt(ractor_main_th);
763 }
764
765 rb_check_deadlock(th->ractor);
766
767 rb_fiber_close(th->ec->fiber_ptr);
768
769 thread_cleanup_func(th, FALSE);
770 VM_ASSERT(th->ec->vm_stack == NULL);
771
772 if (th->invoke_type == thread_invoke_type_ractor_proc) {
773 // after rb_ractor_living_threads_remove()
774 // GC will happen anytime and this ractor can be collected (and destroy GVL).
775 // So gvl_release() should be before it.
776 thread_sched_to_dead(TH_SCHED(th), th);
777 rb_ractor_living_threads_remove(th->ractor, th);
778 }
779 else {
780 rb_ractor_living_threads_remove(th->ractor, th);
781 thread_sched_to_dead(TH_SCHED(th), th);
782 }
783
784 return 0;
785}
788 enum thread_invoke_type type;
789
790 // for normal proc thread
791 VALUE args;
792 VALUE proc;
793
794 // for ractor
795 rb_ractor_t *g;
796
797 // for func
798 VALUE (*fn)(void *);
799};
800
801static void thread_specific_storage_alloc(rb_thread_t *th);
802
803static VALUE
804thread_create_core(VALUE thval, struct thread_create_params *params)
805{
806 rb_execution_context_t *ec = GET_EC();
807 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
808 int err;
809
810 thread_specific_storage_alloc(th);
811
812 if (OBJ_FROZEN(current_th->thgroup)) {
813 rb_raise(rb_eThreadError,
814 "can't start a new thread (frozen ThreadGroup)");
815 }
816
817 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
818
819 switch (params->type) {
820 case thread_invoke_type_proc:
821 th->invoke_type = thread_invoke_type_proc;
822 th->invoke_arg.proc.args = params->args;
823 th->invoke_arg.proc.proc = params->proc;
824 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
825 break;
826
827 case thread_invoke_type_ractor_proc:
828#if RACTOR_CHECK_MODE > 0
829 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
830#endif
831 th->invoke_type = thread_invoke_type_ractor_proc;
832 th->ractor = params->g;
833 th->ractor->threads.main = th;
834 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
835 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
836 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
837 rb_ractor_send_parameters(ec, params->g, params->args);
838 break;
839
840 case thread_invoke_type_func:
841 th->invoke_type = thread_invoke_type_func;
842 th->invoke_arg.func.func = params->fn;
843 th->invoke_arg.func.arg = (void *)params->args;
844 break;
845
846 default:
847 rb_bug("unreachable");
848 }
849
850 th->priority = current_th->priority;
851 th->thgroup = current_th->thgroup;
852
853 th->pending_interrupt_queue = rb_ary_hidden_new(0);
854 th->pending_interrupt_queue_checked = 0;
855 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
856 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
857
858 rb_native_mutex_initialize(&th->interrupt_lock);
859
860 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
861
862 rb_ractor_living_threads_insert(th->ractor, th);
863
864 /* kick thread */
865 err = native_thread_create(th);
866 if (err) {
867 th->status = THREAD_KILLED;
868 rb_ractor_living_threads_remove(th->ractor, th);
869 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
870 }
871 return thval;
872}
873
874#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
875
876/*
877 * call-seq:
878 * Thread.new { ... } -> thread
879 * Thread.new(*args, &proc) -> thread
880 * Thread.new(*args) { |args| ... } -> thread
881 *
882 * Creates a new thread executing the given block.
883 *
884 * Any +args+ given to ::new will be passed to the block:
885 *
886 * arr = []
887 * a, b, c = 1, 2, 3
888 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
889 * arr #=> [1, 2, 3]
890 *
891 * A ThreadError exception is raised if ::new is called without a block.
892 *
893 * If you're going to subclass Thread, be sure to call super in your
894 * +initialize+ method, otherwise a ThreadError will be raised.
895 */
896static VALUE
897thread_s_new(int argc, VALUE *argv, VALUE klass)
898{
899 rb_thread_t *th;
900 VALUE thread = rb_thread_alloc(klass);
901
902 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
903 rb_raise(rb_eThreadError, "can't alloc thread");
904 }
905
906 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
907 th = rb_thread_ptr(thread);
908 if (!threadptr_initialized(th)) {
909 rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
910 klass);
911 }
912 return thread;
913}
914
915/*
916 * call-seq:
917 * Thread.start([args]*) {|args| block } -> thread
918 * Thread.fork([args]*) {|args| block } -> thread
919 *
920 * Basically the same as ::new. However, if class Thread is subclassed, then
921 * calling +start+ in that subclass will not invoke the subclass's
922 * +initialize+ method.
923 */
924
925static VALUE
926thread_start(VALUE klass, VALUE args)
927{
928 struct thread_create_params params = {
929 .type = thread_invoke_type_proc,
930 .args = args,
931 .proc = rb_block_proc(),
932 };
933 return thread_create_core(rb_thread_alloc(klass), &params);
934}
935
936static VALUE
937threadptr_invoke_proc_location(rb_thread_t *th)
938{
939 if (th->invoke_type == thread_invoke_type_proc) {
940 return rb_proc_location(th->invoke_arg.proc.proc);
941 }
942 else {
943 return Qnil;
944 }
945}
946
947/* :nodoc: */
948static VALUE
949thread_initialize(VALUE thread, VALUE args)
950{
951 rb_thread_t *th = rb_thread_ptr(thread);
952
953 if (!rb_block_given_p()) {
954 rb_raise(rb_eThreadError, "must be called with a block");
955 }
956 else if (th->invoke_type != thread_invoke_type_none) {
957 VALUE loc = threadptr_invoke_proc_location(th);
958 if (!NIL_P(loc)) {
959 rb_raise(rb_eThreadError,
960 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
961 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
962 }
963 else {
964 rb_raise(rb_eThreadError, "already initialized thread");
965 }
966 }
967 else {
968 struct thread_create_params params = {
969 .type = thread_invoke_type_proc,
970 .args = args,
971 .proc = rb_block_proc(),
972 };
973 return thread_create_core(thread, &params);
974 }
975}
976
978rb_thread_create(VALUE (*fn)(void *), void *arg)
979{
980 struct thread_create_params params = {
981 .type = thread_invoke_type_func,
982 .fn = fn,
983 .args = (VALUE)arg,
984 };
985 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
986}
987
988VALUE
989rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
990{
991 struct thread_create_params params = {
992 .type = thread_invoke_type_ractor_proc,
993 .g = r,
994 .args = args,
995 .proc = proc,
996 };
997 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
998}
999
1001struct join_arg {
1002 struct rb_waiting_list *waiter;
1003 rb_thread_t *target;
1004 VALUE timeout;
1005 rb_hrtime_t *limit;
1006};
1007
1008static VALUE
1009remove_from_join_list(VALUE arg)
1010{
1011 struct join_arg *p = (struct join_arg *)arg;
1012 rb_thread_t *target_thread = p->target;
1013
1014 if (target_thread->status != THREAD_KILLED) {
1015 struct rb_waiting_list **join_list = &target_thread->join_list;
1016
1017 while (*join_list) {
1018 if (*join_list == p->waiter) {
1019 *join_list = (*join_list)->next;
1020 break;
1021 }
1022
1023 join_list = &(*join_list)->next;
1024 }
1025 }
1026
1027 return Qnil;
1028}
1029
1030static int
1031thread_finished(rb_thread_t *th)
1032{
1033 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1034}
1035
1036static VALUE
1037thread_join_sleep(VALUE arg)
1038{
1039 struct join_arg *p = (struct join_arg *)arg;
1040 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1041 rb_hrtime_t end = 0, *limit = p->limit;
1042
1043 if (limit) {
1044 end = rb_hrtime_add(*limit, rb_hrtime_now());
1045 }
1046
1047 while (!thread_finished(target_th)) {
1048 VALUE scheduler = rb_fiber_scheduler_current();
1049
1050 if (scheduler != Qnil) {
1051 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1052 // Check if the target thread is finished after blocking:
1053 if (thread_finished(target_th)) break;
1054 // Otherwise, a timeout occurred:
1055 else return Qfalse;
1056 }
1057 else if (!limit) {
1058 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1059 }
1060 else {
1061 if (hrtime_update_expire(limit, end)) {
1062 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1063 return Qfalse;
1064 }
1065 th->status = THREAD_STOPPED;
1066 native_sleep(th, limit);
1067 }
1068 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1069 th->status = THREAD_RUNNABLE;
1070
1071 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1072 }
1073
1074 return Qtrue;
1075}
1076
1077static VALUE
1078thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1079{
1080 rb_execution_context_t *ec = GET_EC();
1081 rb_thread_t *th = ec->thread_ptr;
1082 rb_fiber_t *fiber = ec->fiber_ptr;
1083
1084 if (th == target_th) {
1085 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1086 }
1087
1088 if (th->ractor->threads.main == target_th) {
1089 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1090 }
1091
1092 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1093
1094 if (target_th->status != THREAD_KILLED) {
1095 struct rb_waiting_list waiter;
1096 waiter.next = target_th->join_list;
1097 waiter.thread = th;
1098 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1099 target_th->join_list = &waiter;
1100
1101 struct join_arg arg;
1102 arg.waiter = &waiter;
1103 arg.target = target_th;
1104 arg.timeout = timeout;
1105 arg.limit = limit;
1106
1107 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1108 return Qnil;
1109 }
1110 }
1111
1112 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1113
1114 if (target_th->ec->errinfo != Qnil) {
1115 VALUE err = target_th->ec->errinfo;
1116
1117 if (FIXNUM_P(err)) {
1118 switch (err) {
1119 case INT2FIX(TAG_FATAL):
1120 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1121
1122 /* OK. killed. */
1123 break;
1124 default:
1125 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1126 }
1127 }
1128 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1129 rb_bug("thread_join: THROW_DATA should not reach here.");
1130 }
1131 else {
1132 /* normal exception */
1133 rb_exc_raise(err);
1134 }
1135 }
1136 return target_th->self;
1137}
1138
1139/*
1140 * call-seq:
1141 * thr.join -> thr
1142 * thr.join(limit) -> thr
1143 *
1144 * The calling thread will suspend execution and run this +thr+.
1145 *
1146 * Does not return until +thr+ exits or until the given +limit+ seconds have
1147 * passed.
1148 *
1149 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1150 * returned.
1151 *
1152 * Any threads not joined will be killed when the main program exits.
1153 *
1154 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1155 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1156 * will be processed at this time.
1157 *
1158 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1159 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1160 * x.join # Let thread x finish, thread a will be killed on exit.
1161 * #=> "axyz"
1162 *
1163 * The following example illustrates the +limit+ parameter.
1164 *
1165 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1166 * puts "Waiting" until y.join(0.15)
1167 *
1168 * This will produce:
1169 *
1170 * tick...
1171 * Waiting
1172 * tick...
1173 * Waiting
1174 * tick...
1175 * tick...
1176 */
1177
1178static VALUE
1179thread_join_m(int argc, VALUE *argv, VALUE self)
1180{
1181 VALUE timeout = Qnil;
1182 rb_hrtime_t rel = 0, *limit = 0;
1183
1184 if (rb_check_arity(argc, 0, 1)) {
1185 timeout = argv[0];
1186 }
1187
1188 // Convert the timeout eagerly, so it's always converted and deterministic
1189 /*
1190 * This supports INFINITY and negative values, so we can't use
1191 * rb_time_interval right now...
1192 */
1193 if (NIL_P(timeout)) {
1194 /* unlimited */
1195 }
1196 else if (FIXNUM_P(timeout)) {
1197 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1198 limit = &rel;
1199 }
1200 else {
1201 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1202 }
1203
1204 return thread_join(rb_thread_ptr(self), timeout, limit);
1205}
1206
1207/*
1208 * call-seq:
1209 * thr.value -> obj
1210 *
1211 * Waits for +thr+ to complete, using #join, and returns its value or raises
1212 * the exception which terminated the thread.
1213 *
1214 * a = Thread.new { 2 + 2 }
1215 * a.value #=> 4
1216 *
1217 * b = Thread.new { raise 'something went wrong' }
1218 * b.value #=> RuntimeError: something went wrong
1219 */
1220
1221static VALUE
1222thread_value(VALUE self)
1223{
1224 rb_thread_t *th = rb_thread_ptr(self);
1225 thread_join(th, Qnil, 0);
1226 if (UNDEF_P(th->value)) {
1227 // If the thread is dead because we forked th->value is still Qundef.
1228 return Qnil;
1229 }
1230 return th->value;
1231}
1232
1233/*
1234 * Thread Scheduling
1235 */
1236
1237static void
1238getclockofday(struct timespec *ts)
1239{
1240#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1241 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1242 return;
1243#endif
1244 rb_timespec_now(ts);
1245}
1246
1247/*
1248 * Don't inline this, since library call is already time consuming
1249 * and we don't want "struct timespec" on stack too long for GC
1250 */
1251NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1252rb_hrtime_t
1253rb_hrtime_now(void)
1254{
1255 struct timespec ts;
1256
1257 getclockofday(&ts);
1258 return rb_timespec2hrtime(&ts);
1259}
1260
1261/*
1262 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1263 * being uninitialized, maybe other versions, too.
1264 */
1265COMPILER_WARNING_PUSH
1266#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1267COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1268#endif
1269#ifndef PRIu64
1270#define PRIu64 PRI_64_PREFIX "u"
1271#endif
1272/*
1273 * @end is the absolute time when @ts is set to expire
1274 * Returns true if @end has past
1275 * Updates @ts and returns false otherwise
1276 */
1277static int
1278hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1279{
1280 rb_hrtime_t now = rb_hrtime_now();
1281
1282 if (now > end) return 1;
1283
1284 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1285
1286 *timeout = end - now;
1287 return 0;
1288}
1289COMPILER_WARNING_POP
1290
1291static int
1292sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1293{
1294 enum rb_thread_status prev_status = th->status;
1295 int woke;
1296 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1297
1298 th->status = THREAD_STOPPED;
1299 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1300 while (th->status == THREAD_STOPPED) {
1301 native_sleep(th, &rel);
1302 woke = vm_check_ints_blocking(th->ec);
1303 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1304 break;
1305 if (hrtime_update_expire(&rel, end))
1306 break;
1307 woke = 1;
1308 }
1309 th->status = prev_status;
1310 return woke;
1311}
1312
1313static int
1314sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1315{
1316 enum rb_thread_status prev_status = th->status;
1317 int woke;
1318 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1319
1320 th->status = THREAD_STOPPED;
1321 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1322 while (th->status == THREAD_STOPPED) {
1323 native_sleep(th, &rel);
1324 woke = vm_check_ints_blocking(th->ec);
1325 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1326 break;
1327 if (hrtime_update_expire(&rel, end))
1328 break;
1329 woke = 1;
1330 }
1331 th->status = prev_status;
1332 return woke;
1333}
1334
1335static void
1336sleep_forever(rb_thread_t *th, unsigned int fl)
1337{
1338 enum rb_thread_status prev_status = th->status;
1339 enum rb_thread_status status;
1340 int woke;
1341
1342 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1343 th->status = status;
1344
1345 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1346
1347 while (th->status == status) {
1348 if (fl & SLEEP_DEADLOCKABLE) {
1349 rb_ractor_sleeper_threads_inc(th->ractor);
1350 rb_check_deadlock(th->ractor);
1351 }
1352 {
1353 native_sleep(th, 0);
1354 }
1355 if (fl & SLEEP_DEADLOCKABLE) {
1356 rb_ractor_sleeper_threads_dec(th->ractor);
1357 }
1358 if (fl & SLEEP_ALLOW_SPURIOUS) {
1359 break;
1360 }
1361
1362 woke = vm_check_ints_blocking(th->ec);
1363
1364 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1365 break;
1366 }
1367 }
1368 th->status = prev_status;
1369}
1370
1371void
1373{
1374 RUBY_DEBUG_LOG("forever");
1375 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1376}
1377
1378void
1380{
1381 RUBY_DEBUG_LOG("deadly");
1382 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1383}
1384
1385static void
1386rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1387{
1388 VALUE scheduler = rb_fiber_scheduler_current();
1389 if (scheduler != Qnil) {
1390 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1391 }
1392 else {
1393 RUBY_DEBUG_LOG("...");
1394 if (end) {
1395 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1396 }
1397 else {
1398 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1399 }
1400 }
1401}
1402
1403void
1404rb_thread_wait_for(struct timeval time)
1405{
1406 rb_thread_t *th = GET_THREAD();
1407
1408 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1409}
1410
1411/*
1412 * CAUTION: This function causes thread switching.
1413 * rb_thread_check_ints() check ruby's interrupts.
1414 * some interrupt needs thread switching/invoke handlers,
1415 * and so on.
1416 */
1417
1418void
1420{
1421 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1422}
1423
1424/*
1425 * Hidden API for tcl/tk wrapper.
1426 * There is no guarantee to perpetuate it.
1427 */
1428int
1429rb_thread_check_trap_pending(void)
1430{
1431 return rb_signal_buff_size() != 0;
1432}
1433
1434/* This function can be called in blocking region. */
1437{
1438 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1439}
1440
1441void
1442rb_thread_sleep(int sec)
1443{
1445}
1446
1447static void
1448rb_thread_schedule_limits(uint32_t limits_us)
1449{
1450 if (!rb_thread_alone()) {
1451 rb_thread_t *th = GET_THREAD();
1452 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1453
1454 if (th->running_time_us >= limits_us) {
1455 RUBY_DEBUG_LOG("switch %s", "start");
1456
1457 RB_VM_SAVE_MACHINE_CONTEXT(th);
1458 thread_sched_yield(TH_SCHED(th), th);
1459 rb_ractor_thread_switch(th->ractor, th);
1460
1461 RUBY_DEBUG_LOG("switch %s", "done");
1462 }
1463 }
1464}
1465
1466void
1468{
1469 rb_thread_schedule_limits(0);
1470 RUBY_VM_CHECK_INTS(GET_EC());
1471}
1472
1473/* blocking region */
1474
1475static inline int
1476blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1477 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1478{
1479#ifdef RUBY_VM_CRITICAL_SECTION
1480 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1481#endif
1482 VM_ASSERT(th == GET_THREAD());
1483
1484 region->prev_status = th->status;
1485 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1486 th->blocking_region_buffer = region;
1487 th->status = THREAD_STOPPED;
1488 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1489
1490 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1491 return TRUE;
1492 }
1493 else {
1494 return FALSE;
1495 }
1496}
1497
1498static inline void
1499blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1500{
1501 /* entry to ubf_list still permitted at this point, make it impossible: */
1502 unblock_function_clear(th);
1503 /* entry to ubf_list impossible at this point, so unregister is safe: */
1504 unregister_ubf_list(th);
1505
1506 thread_sched_to_running(TH_SCHED(th), th);
1507 rb_ractor_thread_switch(th->ractor, th);
1508
1509 th->blocking_region_buffer = 0;
1510 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1511 if (th->status == THREAD_STOPPED) {
1512 th->status = region->prev_status;
1513 }
1514
1515 RUBY_DEBUG_LOG("end");
1516
1517#ifndef _WIN32
1518 // GET_THREAD() clears WSAGetLastError()
1519 VM_ASSERT(th == GET_THREAD());
1520#endif
1521}
1522
1523void *
1524rb_nogvl(void *(*func)(void *), void *data1,
1525 rb_unblock_function_t *ubf, void *data2,
1526 int flags)
1527{
1528 void *val = 0;
1529 rb_execution_context_t *ec = GET_EC();
1530 rb_thread_t *th = rb_ec_thread_ptr(ec);
1531 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1532 bool is_main_thread = vm->ractor.main_thread == th;
1533 int saved_errno = 0;
1534 VALUE ubf_th = Qfalse;
1535
1536 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1537 ubf = ubf_select;
1538 data2 = th;
1539 }
1540 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1541 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1542 vm->ubf_async_safe = 1;
1543 }
1544 }
1545
1546 BLOCKING_REGION(th, {
1547 val = func(data1);
1548 saved_errno = rb_errno();
1549 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1550
1551 if (is_main_thread) vm->ubf_async_safe = 0;
1552
1553 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1554 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1555 }
1556
1557 if (ubf_th != Qfalse) {
1558 thread_value(rb_thread_kill(ubf_th));
1559 }
1560
1561 rb_errno_set(saved_errno);
1562
1563 return val;
1564}
1565
1566/*
1567 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1568 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1569 * without interrupt process.
1570 *
1571 * rb_thread_call_without_gvl() does:
1572 * (1) Check interrupts.
1573 * (2) release GVL.
1574 * Other Ruby threads may run in parallel.
1575 * (3) call func with data1
1576 * (4) acquire GVL.
1577 * Other Ruby threads can not run in parallel any more.
1578 * (5) Check interrupts.
1579 *
1580 * rb_thread_call_without_gvl2() does:
1581 * (1) Check interrupt and return if interrupted.
1582 * (2) release GVL.
1583 * (3) call func with data1 and a pointer to the flags.
1584 * (4) acquire GVL.
1585 *
1586 * If another thread interrupts this thread (Thread#kill, signal delivery,
1587 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1588 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1589 * toggling a cancellation flag, canceling the invocation of a call inside
1590 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1591 *
1592 * There are built-in ubfs and you can specify these ubfs:
1593 *
1594 * * RUBY_UBF_IO: ubf for IO operation
1595 * * RUBY_UBF_PROCESS: ubf for process operation
1596 *
1597 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1598 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1599 * provide proper ubf(), your program will not stop for Control+C or other
1600 * shutdown events.
1601 *
1602 * "Check interrupts" on above list means checking asynchronous
1603 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1604 * request, and so on) and calling corresponding procedures
1605 * (such as `trap' for signals, raise an exception for Thread#raise).
1606 * If `func()' finished and received interrupts, you may skip interrupt
1607 * checking. For example, assume the following func() it reads data from file.
1608 *
1609 * read_func(...) {
1610 * // (a) before read
1611 * read(buffer); // (b) reading
1612 * // (c) after read
1613 * }
1614 *
1615 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1616 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1617 * at (c), after *read* operation is completed, checking interrupts is harmful
1618 * because it causes irrevocable side-effect, the read data will vanish. To
1619 * avoid such problem, the `read_func()' should be used with
1620 * `rb_thread_call_without_gvl2()'.
1621 *
1622 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1623 * immediately. This function does not show when the execution was interrupted.
1624 * For example, there are 4 possible timing (a), (b), (c) and before calling
1625 * read_func(). You need to record progress of a read_func() and check
1626 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1627 * `rb_thread_check_ints()' correctly or your program can not process proper
1628 * process such as `trap' and so on.
1629 *
1630 * NOTE: You can not execute most of Ruby C API and touch Ruby
1631 * objects in `func()' and `ubf()', including raising an
1632 * exception, because current thread doesn't acquire GVL
1633 * (it causes synchronization problems). If you need to
1634 * call ruby functions either use rb_thread_call_with_gvl()
1635 * or read source code of C APIs and confirm safety by
1636 * yourself.
1637 *
1638 * NOTE: In short, this API is difficult to use safely. I recommend you
1639 * use other ways if you have. We lack experiences to use this API.
1640 * Please report your problem related on it.
1641 *
1642 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1643 * for a short running `func()'. Be sure to benchmark and use this
1644 * mechanism when `func()' consumes enough time.
1645 *
1646 * Safe C API:
1647 * * rb_thread_interrupted() - check interrupt flag
1648 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1649 * they will work without GVL, and may acquire GVL when GC is needed.
1650 */
1651void *
1652rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1653 rb_unblock_function_t *ubf, void *data2)
1654{
1655 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1656}
1657
1658void *
1659rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1660 rb_unblock_function_t *ubf, void *data2)
1661{
1662 return rb_nogvl(func, data1, ubf, data2, 0);
1663}
1664
1665static int
1666waitfd_to_waiting_flag(int wfd_event)
1667{
1668 return wfd_event << 1;
1669}
1670
1671static void
1672thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1673{
1674 wfd->fd = fd;
1675 wfd->th = th;
1676 wfd->busy = NULL;
1677
1678 RB_VM_LOCK_ENTER();
1679 {
1680 ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1681 }
1682 RB_VM_LOCK_LEAVE();
1683}
1684
1685static void
1686thread_io_wake_pending_closer(struct waiting_fd *wfd)
1687{
1688 bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1689 if (has_waiter) {
1690 rb_mutex_lock(wfd->busy->wakeup_mutex);
1691 }
1692
1693 /* Needs to be protected with RB_VM_LOCK because we don't know if
1694 wfd is on the global list of pending FD ops or if it's on a
1695 struct rb_io_close_wait_list close-waiter. */
1696 RB_VM_LOCK_ENTER();
1697 ccan_list_del(&wfd->wfd_node);
1698 RB_VM_LOCK_LEAVE();
1699
1700 if (has_waiter) {
1701 rb_thread_t *th = rb_thread_ptr(wfd->busy->closing_thread);
1702 if (th->scheduler != Qnil) {
1703 rb_fiber_scheduler_unblock(th->scheduler, wfd->busy->closing_thread, wfd->busy->closing_fiber);
1704 } else {
1705 rb_thread_wakeup(wfd->busy->closing_thread);
1706 }
1707 rb_mutex_unlock(wfd->busy->wakeup_mutex);
1708 }
1709}
1710
1711static int
1712thread_io_wait_events(rb_thread_t *th, rb_execution_context_t *ec, int fd, int events, struct timeval *timeout, struct waiting_fd *wfd)
1713{
1714#if defined(USE_MN_THREADS) && USE_MN_THREADS
1715 if (!th_has_dedicated_nt(th) &&
1716 (events || timeout) &&
1717 th->blocking // no fiber scheduler
1718 ) {
1719 int r;
1720 rb_hrtime_t rel, *prel;
1721
1722 if (timeout) {
1723 rel = rb_timeval2hrtime(timeout);
1724 prel = &rel;
1725 }
1726 else {
1727 prel = NULL;
1728 }
1729
1730 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1731
1732 thread_io_setup_wfd(th, fd, wfd);
1733 {
1734 // wait readable/writable
1735 r = thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel);
1736 }
1737 thread_io_wake_pending_closer(wfd);
1738
1739 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1740
1741 return r;
1742 }
1743#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1744
1745 return 0;
1746}
1747
1748VALUE
1749rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events)
1750{
1751 rb_execution_context_t * volatile ec = GET_EC();
1752 rb_thread_t *th = rb_ec_thread_ptr(ec);
1753
1754 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events);
1755
1756 struct waiting_fd waiting_fd;
1757
1758 thread_io_wait_events(th, ec, fd, events, NULL, &waiting_fd);
1759
1760 volatile VALUE val = Qundef; /* shouldn't be used */
1761 volatile int saved_errno = 0;
1762 enum ruby_tag_type state;
1763
1764 // `errno` is only valid when there is an actual error - but we can't
1765 // extract that from the return value of `func` alone, so we clear any
1766 // prior `errno` value here so that we can later check if it was set by
1767 // `func` or not (as opposed to some previously set value).
1768 errno = 0;
1769
1770 thread_io_setup_wfd(th, fd, &waiting_fd);
1771
1772 EC_PUSH_TAG(ec);
1773 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1774 BLOCKING_REGION(waiting_fd.th, {
1775 val = func(data1);
1776 saved_errno = errno;
1777 }, ubf_select, waiting_fd.th, FALSE);
1778 }
1779 EC_POP_TAG();
1780
1781 /*
1782 * must be deleted before jump
1783 * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1784 */
1785 thread_io_wake_pending_closer(&waiting_fd);
1786
1787 if (state) {
1788 EC_JUMP_TAG(ec, state);
1789 }
1790 /* TODO: check func() */
1791 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1792
1793 // If the error was a timeout, we raise a specific exception for that:
1794 if (saved_errno == ETIMEDOUT) {
1795 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1796 }
1797
1798 errno = saved_errno;
1799
1800 return val;
1801}
1802
1803VALUE
1804rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1805{
1806 return rb_thread_io_blocking_call(func, data1, fd, 0);
1807}
1808
1809/*
1810 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1811 *
1812 * After releasing GVL using
1813 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1814 * methods. If you need to access Ruby you must use this function
1815 * rb_thread_call_with_gvl().
1816 *
1817 * This function rb_thread_call_with_gvl() does:
1818 * (1) acquire GVL.
1819 * (2) call passed function `func'.
1820 * (3) release GVL.
1821 * (4) return a value which is returned at (2).
1822 *
1823 * NOTE: You should not return Ruby object at (2) because such Object
1824 * will not be marked.
1825 *
1826 * NOTE: If an exception is raised in `func', this function DOES NOT
1827 * protect (catch) the exception. If you have any resources
1828 * which should free before throwing exception, you need use
1829 * rb_protect() in `func' and return a value which represents
1830 * exception was raised.
1831 *
1832 * NOTE: This function should not be called by a thread which was not
1833 * created as Ruby thread (created by Thread.new or so). In other
1834 * words, this function *DOES NOT* associate or convert a NON-Ruby
1835 * thread to a Ruby thread.
1836 */
1837void *
1838rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1839{
1840 rb_thread_t *th = ruby_thread_from_native();
1841 struct rb_blocking_region_buffer *brb;
1842 struct rb_unblock_callback prev_unblock;
1843 void *r;
1844
1845 if (th == 0) {
1846 /* Error has occurred, but we can't use rb_bug()
1847 * because this thread is not Ruby's thread.
1848 * What should we do?
1849 */
1850 bp();
1851 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1852 exit(EXIT_FAILURE);
1853 }
1854
1855 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1856 prev_unblock = th->unblock;
1857
1858 if (brb == 0) {
1859 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1860 }
1861
1862 blocking_region_end(th, brb);
1863 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1864 r = (*func)(data1);
1865 /* leave from Ruby world: You can not access Ruby values, etc. */
1866 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1867 RUBY_ASSERT_ALWAYS(released);
1868 RB_VM_SAVE_MACHINE_CONTEXT(th);
1869 thread_sched_to_waiting(TH_SCHED(th), th);
1870 return r;
1871}
1872
1873/*
1874 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1875 *
1876 ***
1877 *** This API is EXPERIMENTAL!
1878 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1879 ***
1880 */
1881
1882int
1883ruby_thread_has_gvl_p(void)
1884{
1885 rb_thread_t *th = ruby_thread_from_native();
1886
1887 if (th && th->blocking_region_buffer == 0) {
1888 return 1;
1889 }
1890 else {
1891 return 0;
1892 }
1893}
1894
1895/*
1896 * call-seq:
1897 * Thread.pass -> nil
1898 *
1899 * Give the thread scheduler a hint to pass execution to another thread.
1900 * A running thread may or may not switch, it depends on OS and processor.
1901 */
1902
1903static VALUE
1904thread_s_pass(VALUE klass)
1905{
1907 return Qnil;
1908}
1909
1910/*****************************************************/
1911
1912/*
1913 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1914 *
1915 * Async events such as an exception thrown by Thread#raise,
1916 * Thread#kill and thread termination (after main thread termination)
1917 * will be queued to th->pending_interrupt_queue.
1918 * - clear: clear the queue.
1919 * - enque: enqueue err object into queue.
1920 * - deque: dequeue err object from queue.
1921 * - active_p: return 1 if the queue should be checked.
1922 *
1923 * All rb_threadptr_pending_interrupt_* functions are called by
1924 * a GVL acquired thread, of course.
1925 * Note that all "rb_" prefix APIs need GVL to call.
1926 */
1927
1928void
1929rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1930{
1931 rb_ary_clear(th->pending_interrupt_queue);
1932}
1933
1934void
1935rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1936{
1937 rb_ary_push(th->pending_interrupt_queue, v);
1938 th->pending_interrupt_queue_checked = 0;
1939}
1940
1941static void
1942threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1943{
1944 if (!th->pending_interrupt_queue) {
1945 rb_raise(rb_eThreadError, "uninitialized thread");
1946 }
1947}
1948
1949enum handle_interrupt_timing {
1950 INTERRUPT_NONE,
1951 INTERRUPT_IMMEDIATE,
1952 INTERRUPT_ON_BLOCKING,
1953 INTERRUPT_NEVER
1954};
1955
1956static enum handle_interrupt_timing
1957rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
1958{
1959 if (sym == sym_immediate) {
1960 return INTERRUPT_IMMEDIATE;
1961 }
1962 else if (sym == sym_on_blocking) {
1963 return INTERRUPT_ON_BLOCKING;
1964 }
1965 else if (sym == sym_never) {
1966 return INTERRUPT_NEVER;
1967 }
1968 else {
1969 rb_raise(rb_eThreadError, "unknown mask signature");
1970 }
1971}
1972
1973static enum handle_interrupt_timing
1974rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
1975{
1976 VALUE mask;
1977 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1978 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1979 VALUE mod;
1980 long i;
1981
1982 for (i=0; i<mask_stack_len; i++) {
1983 mask = mask_stack[mask_stack_len-(i+1)];
1984
1985 if (SYMBOL_P(mask)) {
1986 /* do not match RUBY_FATAL_THREAD_KILLED etc */
1987 if (err != rb_cInteger) {
1988 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
1989 }
1990 else {
1991 continue;
1992 }
1993 }
1994
1995 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
1996 VALUE klass = mod;
1997 VALUE sym;
1998
1999 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2000 klass = RBASIC(mod)->klass;
2001 }
2002 else if (mod != RCLASS_ORIGIN(mod)) {
2003 continue;
2004 }
2005
2006 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2007 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2008 }
2009 }
2010 /* try to next mask */
2011 }
2012 return INTERRUPT_NONE;
2013}
2014
2015static int
2016rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2017{
2018 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2019}
2020
2021static int
2022rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2023{
2024 int i;
2025 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2026 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2027 if (rb_obj_is_kind_of(e, err)) {
2028 return TRUE;
2029 }
2030 }
2031 return FALSE;
2032}
2033
2034static VALUE
2035rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2036{
2037#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2038 int i;
2039
2040 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2041 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2042
2043 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2044
2045 switch (mask_timing) {
2046 case INTERRUPT_ON_BLOCKING:
2047 if (timing != INTERRUPT_ON_BLOCKING) {
2048 break;
2049 }
2050 /* fall through */
2051 case INTERRUPT_NONE: /* default: IMMEDIATE */
2052 case INTERRUPT_IMMEDIATE:
2053 rb_ary_delete_at(th->pending_interrupt_queue, i);
2054 return err;
2055 case INTERRUPT_NEVER:
2056 break;
2057 }
2058 }
2059
2060 th->pending_interrupt_queue_checked = 1;
2061 return Qundef;
2062#else
2063 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2064 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2065 th->pending_interrupt_queue_checked = 1;
2066 }
2067 return err;
2068#endif
2069}
2070
2071static int
2072threadptr_pending_interrupt_active_p(rb_thread_t *th)
2073{
2074 /*
2075 * For optimization, we don't check async errinfo queue
2076 * if the queue and the thread interrupt mask were not changed
2077 * since last check.
2078 */
2079 if (th->pending_interrupt_queue_checked) {
2080 return 0;
2081 }
2082
2083 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2084 return 0;
2085 }
2086
2087 return 1;
2088}
2089
2090static int
2091handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2092{
2093 VALUE *maskp = (VALUE *)args;
2094
2095 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2096 rb_raise(rb_eArgError, "unknown mask signature");
2097 }
2098
2099 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2100 *maskp = val;
2101 return ST_CONTINUE;
2102 }
2103
2104 if (RTEST(*maskp)) {
2105 if (!RB_TYPE_P(*maskp, T_HASH)) {
2106 VALUE prev = *maskp;
2107 *maskp = rb_ident_hash_new();
2108 if (SYMBOL_P(prev)) {
2109 rb_hash_aset(*maskp, rb_eException, prev);
2110 }
2111 }
2112 rb_hash_aset(*maskp, key, val);
2113 }
2114 else {
2115 *maskp = Qfalse;
2116 }
2117
2118 return ST_CONTINUE;
2119}
2120
2121/*
2122 * call-seq:
2123 * Thread.handle_interrupt(hash) { ... } -> result of the block
2124 *
2125 * Changes asynchronous interrupt timing.
2126 *
2127 * _interrupt_ means asynchronous event and corresponding procedure
2128 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2129 * and main thread termination (if main thread terminates, then all
2130 * other thread will be killed).
2131 *
2132 * The given +hash+ has pairs like <code>ExceptionClass =>
2133 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2134 * the given block. The TimingSymbol can be one of the following symbols:
2135 *
2136 * [+:immediate+] Invoke interrupts immediately.
2137 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2138 * [+:never+] Never invoke all interrupts.
2139 *
2140 * _BlockingOperation_ means that the operation will block the calling thread,
2141 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2142 * operation executed without GVL.
2143 *
2144 * Masked asynchronous interrupts are delayed until they are enabled.
2145 * This method is similar to sigprocmask(3).
2146 *
2147 * === NOTE
2148 *
2149 * Asynchronous interrupts are difficult to use.
2150 *
2151 * If you need to communicate between threads, please consider to use another way such as Queue.
2152 *
2153 * Or use them with deep understanding about this method.
2154 *
2155 * === Usage
2156 *
2157 * In this example, we can guard from Thread#raise exceptions.
2158 *
2159 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2160 * ignored in the first block of the main thread. In the second
2161 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2162 *
2163 * th = Thread.new do
2164 * Thread.handle_interrupt(RuntimeError => :never) {
2165 * begin
2166 * # You can write resource allocation code safely.
2167 * Thread.handle_interrupt(RuntimeError => :immediate) {
2168 * # ...
2169 * }
2170 * ensure
2171 * # You can write resource deallocation code safely.
2172 * end
2173 * }
2174 * end
2175 * Thread.pass
2176 * # ...
2177 * th.raise "stop"
2178 *
2179 * While we are ignoring the RuntimeError exception, it's safe to write our
2180 * resource allocation code. Then, the ensure block is where we can safely
2181 * deallocate your resources.
2182 *
2183 * ==== Guarding from Timeout::Error
2184 *
2185 * In the next example, we will guard from the Timeout::Error exception. This
2186 * will help prevent from leaking resources when Timeout::Error exceptions occur
2187 * during normal ensure clause. For this example we use the help of the
2188 * standard library Timeout, from lib/timeout.rb
2189 *
2190 * require 'timeout'
2191 * Thread.handle_interrupt(Timeout::Error => :never) {
2192 * timeout(10){
2193 * # Timeout::Error doesn't occur here
2194 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2195 * # possible to be killed by Timeout::Error
2196 * # while blocking operation
2197 * }
2198 * # Timeout::Error doesn't occur here
2199 * }
2200 * }
2201 *
2202 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2203 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2204 * operation that will block the calling thread is susceptible to a
2205 * Timeout::Error exception being raised.
2206 *
2207 * ==== Stack control settings
2208 *
2209 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2210 * to control more than one ExceptionClass and TimingSymbol at a time.
2211 *
2212 * Thread.handle_interrupt(FooError => :never) {
2213 * Thread.handle_interrupt(BarError => :never) {
2214 * # FooError and BarError are prohibited.
2215 * }
2216 * }
2217 *
2218 * ==== Inheritance with ExceptionClass
2219 *
2220 * All exceptions inherited from the ExceptionClass parameter will be considered.
2221 *
2222 * Thread.handle_interrupt(Exception => :never) {
2223 * # all exceptions inherited from Exception are prohibited.
2224 * }
2225 *
2226 * For handling all interrupts, use +Object+ and not +Exception+
2227 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2228 */
2229static VALUE
2230rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2231{
2232 VALUE mask = Qundef;
2233 rb_execution_context_t * volatile ec = GET_EC();
2234 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2235 volatile VALUE r = Qnil;
2236 enum ruby_tag_type state;
2237
2238 if (!rb_block_given_p()) {
2239 rb_raise(rb_eArgError, "block is needed.");
2240 }
2241
2242 mask_arg = rb_to_hash_type(mask_arg);
2243
2244 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2245 mask = Qnil;
2246 }
2247
2248 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2249
2250 if (UNDEF_P(mask)) {
2251 return rb_yield(Qnil);
2252 }
2253
2254 if (!RTEST(mask)) {
2255 mask = mask_arg;
2256 }
2257 else if (RB_TYPE_P(mask, T_HASH)) {
2258 OBJ_FREEZE_RAW(mask);
2259 }
2260
2261 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2262 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2263 th->pending_interrupt_queue_checked = 0;
2264 RUBY_VM_SET_INTERRUPT(th->ec);
2265 }
2266
2267 EC_PUSH_TAG(th->ec);
2268 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2269 r = rb_yield(Qnil);
2270 }
2271 EC_POP_TAG();
2272
2273 rb_ary_pop(th->pending_interrupt_mask_stack);
2274 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2275 th->pending_interrupt_queue_checked = 0;
2276 RUBY_VM_SET_INTERRUPT(th->ec);
2277 }
2278
2279 RUBY_VM_CHECK_INTS(th->ec);
2280
2281 if (state) {
2282 EC_JUMP_TAG(th->ec, state);
2283 }
2284
2285 return r;
2286}
2287
2288/*
2289 * call-seq:
2290 * target_thread.pending_interrupt?(error = nil) -> true/false
2291 *
2292 * Returns whether or not the asynchronous queue is empty for the target thread.
2293 *
2294 * If +error+ is given, then check only for +error+ type deferred events.
2295 *
2296 * See ::pending_interrupt? for more information.
2297 */
2298static VALUE
2299rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2300{
2301 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2302
2303 if (!target_th->pending_interrupt_queue) {
2304 return Qfalse;
2305 }
2306 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2307 return Qfalse;
2308 }
2309 if (rb_check_arity(argc, 0, 1)) {
2310 VALUE err = argv[0];
2311 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2312 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2313 }
2314 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2315 }
2316 else {
2317 return Qtrue;
2318 }
2319}
2320
2321/*
2322 * call-seq:
2323 * Thread.pending_interrupt?(error = nil) -> true/false
2324 *
2325 * Returns whether or not the asynchronous queue is empty.
2326 *
2327 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2328 * this method can be used to determine if there are any deferred events.
2329 *
2330 * If you find this method returns true, then you may finish +:never+ blocks.
2331 *
2332 * For example, the following method processes deferred asynchronous events
2333 * immediately.
2334 *
2335 * def Thread.kick_interrupt_immediately
2336 * Thread.handle_interrupt(Object => :immediate) {
2337 * Thread.pass
2338 * }
2339 * end
2340 *
2341 * If +error+ is given, then check only for +error+ type deferred events.
2342 *
2343 * === Usage
2344 *
2345 * th = Thread.new{
2346 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2347 * while true
2348 * ...
2349 * # reach safe point to invoke interrupt
2350 * if Thread.pending_interrupt?
2351 * Thread.handle_interrupt(Object => :immediate){}
2352 * end
2353 * ...
2354 * end
2355 * }
2356 * }
2357 * ...
2358 * th.raise # stop thread
2359 *
2360 * This example can also be written as the following, which you should use to
2361 * avoid asynchronous interrupts.
2362 *
2363 * flag = true
2364 * th = Thread.new{
2365 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2366 * while true
2367 * ...
2368 * # reach safe point to invoke interrupt
2369 * break if flag == false
2370 * ...
2371 * end
2372 * }
2373 * }
2374 * ...
2375 * flag = false # stop thread
2376 */
2377
2378static VALUE
2379rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2380{
2381 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2382}
2383
2384NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2385
2386static void
2387rb_threadptr_to_kill(rb_thread_t *th)
2388{
2389 rb_threadptr_pending_interrupt_clear(th);
2390 th->status = THREAD_RUNNABLE;
2391 th->to_kill = 1;
2392 th->ec->errinfo = INT2FIX(TAG_FATAL);
2393 EC_JUMP_TAG(th->ec, TAG_FATAL);
2394}
2395
2396static inline rb_atomic_t
2397threadptr_get_interrupts(rb_thread_t *th)
2398{
2399 rb_execution_context_t *ec = th->ec;
2400 rb_atomic_t interrupt;
2401 rb_atomic_t old;
2402
2403 do {
2404 interrupt = ec->interrupt_flag;
2405 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2406 } while (old != interrupt);
2407 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2408}
2409
2410int
2411rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2412{
2413 rb_atomic_t interrupt;
2414 int postponed_job_interrupt = 0;
2415 int ret = FALSE;
2416
2417 if (th->ec->raised_flag) return ret;
2418
2419 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2420 int sig;
2421 int timer_interrupt;
2422 int pending_interrupt;
2423 int trap_interrupt;
2424 int terminate_interrupt;
2425
2426 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2427 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2428 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2429 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2430 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2431
2432 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2433 RB_VM_LOCK_ENTER();
2434 RB_VM_LOCK_LEAVE();
2435 }
2436
2437 if (postponed_job_interrupt) {
2438 rb_postponed_job_flush(th->vm);
2439 }
2440
2441 /* signal handling */
2442 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2443 enum rb_thread_status prev_status = th->status;
2444
2445 th->status = THREAD_RUNNABLE;
2446 {
2447 while ((sig = rb_get_next_signal()) != 0) {
2448 ret |= rb_signal_exec(th, sig);
2449 }
2450 }
2451 th->status = prev_status;
2452 }
2453
2454 /* exception from another thread */
2455 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2456 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2457 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2458 ret = TRUE;
2459
2460 if (UNDEF_P(err)) {
2461 /* no error */
2462 }
2463 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2464 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2465 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2466 terminate_interrupt = 1;
2467 }
2468 else {
2469 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2470 /* the only special exception to be queued across thread */
2471 err = ruby_vm_special_exception_copy(err);
2472 }
2473 /* set runnable if th was slept. */
2474 if (th->status == THREAD_STOPPED ||
2475 th->status == THREAD_STOPPED_FOREVER)
2476 th->status = THREAD_RUNNABLE;
2477 rb_exc_raise(err);
2478 }
2479 }
2480
2481 if (terminate_interrupt) {
2482 rb_threadptr_to_kill(th);
2483 }
2484
2485 if (timer_interrupt) {
2486 uint32_t limits_us = TIME_QUANTUM_USEC;
2487
2488 if (th->priority > 0)
2489 limits_us <<= th->priority;
2490 else
2491 limits_us >>= -th->priority;
2492
2493 if (th->status == THREAD_RUNNABLE)
2494 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2495
2496 VM_ASSERT(th->ec->cfp);
2497 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2498 0, 0, 0, Qundef);
2499
2500 rb_thread_schedule_limits(limits_us);
2501 }
2502 }
2503 return ret;
2504}
2505
2506void
2507rb_thread_execute_interrupts(VALUE thval)
2508{
2509 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2510}
2511
2512static void
2513rb_threadptr_ready(rb_thread_t *th)
2514{
2515 rb_threadptr_interrupt(th);
2516}
2517
2518static VALUE
2519rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2520{
2521 VALUE exc;
2522
2523 if (rb_threadptr_dead(target_th)) {
2524 return Qnil;
2525 }
2526
2527 if (argc == 0) {
2528 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2529 }
2530 else {
2531 exc = rb_make_exception(argc, argv);
2532 }
2533
2534 /* making an exception object can switch thread,
2535 so we need to check thread deadness again */
2536 if (rb_threadptr_dead(target_th)) {
2537 return Qnil;
2538 }
2539
2540 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2541 rb_threadptr_pending_interrupt_enque(target_th, exc);
2542 rb_threadptr_interrupt(target_th);
2543 return Qnil;
2544}
2545
2546void
2547rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2548{
2549 VALUE argv[2];
2550
2551 argv[0] = rb_eSignal;
2552 argv[1] = INT2FIX(sig);
2553 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2554}
2555
2556void
2557rb_threadptr_signal_exit(rb_thread_t *th)
2558{
2559 VALUE argv[2];
2560
2561 argv[0] = rb_eSystemExit;
2562 argv[1] = rb_str_new2("exit");
2563
2564 // TODO: check signal raise deliverly
2565 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2566}
2567
2568int
2569rb_ec_set_raised(rb_execution_context_t *ec)
2570{
2571 if (ec->raised_flag & RAISED_EXCEPTION) {
2572 return 1;
2573 }
2574 ec->raised_flag |= RAISED_EXCEPTION;
2575 return 0;
2576}
2577
2578int
2579rb_ec_reset_raised(rb_execution_context_t *ec)
2580{
2581 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2582 return 0;
2583 }
2584 ec->raised_flag &= ~RAISED_EXCEPTION;
2585 return 1;
2586}
2587
2588int
2589rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2590{
2591 rb_vm_t *vm = GET_THREAD()->vm;
2592 struct waiting_fd *wfd = 0, *next;
2593 ccan_list_head_init(&busy->pending_fd_users);
2594 int has_any;
2595 VALUE wakeup_mutex;
2596
2597 RB_VM_LOCK_ENTER();
2598 {
2599 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2600 if (wfd->fd == fd) {
2601 rb_thread_t *th = wfd->th;
2602 VALUE err;
2603
2604 ccan_list_del(&wfd->wfd_node);
2605 ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2606
2607 wfd->busy = busy;
2608 err = th->vm->special_exceptions[ruby_error_stream_closed];
2609 rb_threadptr_pending_interrupt_enque(th, err);
2610 rb_threadptr_interrupt(th);
2611 }
2612 }
2613 }
2614
2615 has_any = !ccan_list_empty(&busy->pending_fd_users);
2616 busy->closing_thread = rb_thread_current();
2617 busy->closing_fiber = rb_fiber_current();
2618 wakeup_mutex = Qnil;
2619 if (has_any) {
2620 wakeup_mutex = rb_mutex_new();
2621 RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2622 }
2623 busy->wakeup_mutex = wakeup_mutex;
2624
2625 RB_VM_LOCK_LEAVE();
2626
2627 /* If the caller didn't pass *busy as a pointer to something on the stack,
2628 we need to guard this mutex object on _our_ C stack for the duration
2629 of this function. */
2630 RB_GC_GUARD(wakeup_mutex);
2631 return has_any;
2632}
2633
2634void
2635rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2636{
2637 if (!RB_TEST(busy->wakeup_mutex)) {
2638 /* There was nobody else using this file when we closed it, so we
2639 never bothered to allocate a mutex*/
2640 return;
2641 }
2642
2643 rb_mutex_lock(busy->wakeup_mutex);
2644 while (!ccan_list_empty(&busy->pending_fd_users)) {
2645 rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2646 }
2647 rb_mutex_unlock(busy->wakeup_mutex);
2648}
2649
2650void
2651rb_thread_fd_close(int fd)
2652{
2653 struct rb_io_close_wait_list busy;
2654
2655 if (rb_notify_fd_close(fd, &busy)) {
2656 rb_notify_fd_close_wait(&busy);
2657 }
2658}
2659
2660/*
2661 * call-seq:
2662 * thr.raise
2663 * thr.raise(string)
2664 * thr.raise(exception [, string [, array]])
2665 *
2666 * Raises an exception from the given thread. The caller does not have to be
2667 * +thr+. See Kernel#raise for more information.
2668 *
2669 * Thread.abort_on_exception = true
2670 * a = Thread.new { sleep(200) }
2671 * a.raise("Gotcha")
2672 *
2673 * This will produce:
2674 *
2675 * prog.rb:3: Gotcha (RuntimeError)
2676 * from prog.rb:2:in `initialize'
2677 * from prog.rb:2:in `new'
2678 * from prog.rb:2
2679 */
2680
2681static VALUE
2682thread_raise_m(int argc, VALUE *argv, VALUE self)
2683{
2684 rb_thread_t *target_th = rb_thread_ptr(self);
2685 const rb_thread_t *current_th = GET_THREAD();
2686
2687 threadptr_check_pending_interrupt_queue(target_th);
2688 rb_threadptr_raise(target_th, argc, argv);
2689
2690 /* To perform Thread.current.raise as Kernel.raise */
2691 if (current_th == target_th) {
2692 RUBY_VM_CHECK_INTS(target_th->ec);
2693 }
2694 return Qnil;
2695}
2696
2697
2698/*
2699 * call-seq:
2700 * thr.exit -> thr
2701 * thr.kill -> thr
2702 * thr.terminate -> thr
2703 *
2704 * Terminates +thr+ and schedules another thread to be run, returning
2705 * the terminated Thread. If this is the main thread, or the last
2706 * thread, exits the process.
2707 */
2708
2710rb_thread_kill(VALUE thread)
2711{
2712 rb_thread_t *target_th = rb_thread_ptr(thread);
2713
2714 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2715 return thread;
2716 }
2717 if (target_th == target_th->vm->ractor.main_thread) {
2718 rb_exit(EXIT_SUCCESS);
2719 }
2720
2721 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2722
2723 if (target_th == GET_THREAD()) {
2724 /* kill myself immediately */
2725 rb_threadptr_to_kill(target_th);
2726 }
2727 else {
2728 threadptr_check_pending_interrupt_queue(target_th);
2729 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2730 rb_threadptr_interrupt(target_th);
2731 }
2732
2733 return thread;
2734}
2735
2736int
2737rb_thread_to_be_killed(VALUE thread)
2738{
2739 rb_thread_t *target_th = rb_thread_ptr(thread);
2740
2741 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2742 return TRUE;
2743 }
2744 return FALSE;
2745}
2746
2747/*
2748 * call-seq:
2749 * Thread.kill(thread) -> thread
2750 *
2751 * Causes the given +thread+ to exit, see also Thread::exit.
2752 *
2753 * count = 0
2754 * a = Thread.new { loop { count += 1 } }
2755 * sleep(0.1) #=> 0
2756 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2757 * count #=> 93947
2758 * a.alive? #=> false
2759 */
2760
2761static VALUE
2762rb_thread_s_kill(VALUE obj, VALUE th)
2763{
2764 return rb_thread_kill(th);
2765}
2766
2767
2768/*
2769 * call-seq:
2770 * Thread.exit -> thread
2771 *
2772 * Terminates the currently running thread and schedules another thread to be
2773 * run.
2774 *
2775 * If this thread is already marked to be killed, ::exit returns the Thread.
2776 *
2777 * If this is the main thread, or the last thread, exit the process.
2778 */
2779
2780static VALUE
2781rb_thread_exit(VALUE _)
2782{
2783 rb_thread_t *th = GET_THREAD();
2784 return rb_thread_kill(th->self);
2785}
2786
2787
2788/*
2789 * call-seq:
2790 * thr.wakeup -> thr
2791 *
2792 * Marks a given thread as eligible for scheduling, however it may still
2793 * remain blocked on I/O.
2794 *
2795 * *Note:* This does not invoke the scheduler, see #run for more information.
2796 *
2797 * c = Thread.new { Thread.stop; puts "hey!" }
2798 * sleep 0.1 while c.status!='sleep'
2799 * c.wakeup
2800 * c.join
2801 * #=> "hey!"
2802 */
2803
2805rb_thread_wakeup(VALUE thread)
2806{
2807 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2808 rb_raise(rb_eThreadError, "killed thread");
2809 }
2810 return thread;
2811}
2812
2815{
2816 rb_thread_t *target_th = rb_thread_ptr(thread);
2817 if (target_th->status == THREAD_KILLED) return Qnil;
2818
2819 rb_threadptr_ready(target_th);
2820
2821 if (target_th->status == THREAD_STOPPED ||
2822 target_th->status == THREAD_STOPPED_FOREVER) {
2823 target_th->status = THREAD_RUNNABLE;
2824 }
2825
2826 return thread;
2827}
2828
2829
2830/*
2831 * call-seq:
2832 * thr.run -> thr
2833 *
2834 * Wakes up +thr+, making it eligible for scheduling.
2835 *
2836 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2837 * sleep 0.1 while a.status!='sleep'
2838 * puts "Got here"
2839 * a.run
2840 * a.join
2841 *
2842 * This will produce:
2843 *
2844 * a
2845 * Got here
2846 * c
2847 *
2848 * See also the instance method #wakeup.
2849 */
2850
2852rb_thread_run(VALUE thread)
2853{
2854 rb_thread_wakeup(thread);
2856 return thread;
2857}
2858
2859
2861rb_thread_stop(void)
2862{
2863 if (rb_thread_alone()) {
2864 rb_raise(rb_eThreadError,
2865 "stopping only thread\n\tnote: use sleep to stop forever");
2866 }
2868 return Qnil;
2869}
2870
2871/*
2872 * call-seq:
2873 * Thread.stop -> nil
2874 *
2875 * Stops execution of the current thread, putting it into a ``sleep'' state,
2876 * and schedules execution of another thread.
2877 *
2878 * a = Thread.new { print "a"; Thread.stop; print "c" }
2879 * sleep 0.1 while a.status!='sleep'
2880 * print "b"
2881 * a.run
2882 * a.join
2883 * #=> "abc"
2884 */
2885
2886static VALUE
2887thread_stop(VALUE _)
2888{
2889 return rb_thread_stop();
2890}
2891
2892/********************************************************************/
2893
2894VALUE
2895rb_thread_list(void)
2896{
2897 // TODO
2898 return rb_ractor_thread_list();
2899}
2900
2901/*
2902 * call-seq:
2903 * Thread.list -> array
2904 *
2905 * Returns an array of Thread objects for all threads that are either runnable
2906 * or stopped.
2907 *
2908 * Thread.new { sleep(200) }
2909 * Thread.new { 1000000.times {|i| i*i } }
2910 * Thread.new { Thread.stop }
2911 * Thread.list.each {|t| p t}
2912 *
2913 * This will produce:
2914 *
2915 * #<Thread:0x401b3e84 sleep>
2916 * #<Thread:0x401b3f38 run>
2917 * #<Thread:0x401b3fb0 sleep>
2918 * #<Thread:0x401bdf4c run>
2919 */
2920
2921static VALUE
2922thread_list(VALUE _)
2923{
2924 return rb_thread_list();
2925}
2926
2929{
2930 return GET_THREAD()->self;
2931}
2932
2933/*
2934 * call-seq:
2935 * Thread.current -> thread
2936 *
2937 * Returns the currently executing thread.
2938 *
2939 * Thread.current #=> #<Thread:0x401bdf4c run>
2940 */
2941
2942static VALUE
2943thread_s_current(VALUE klass)
2944{
2945 return rb_thread_current();
2946}
2947
2949rb_thread_main(void)
2950{
2951 return GET_RACTOR()->threads.main->self;
2952}
2953
2954/*
2955 * call-seq:
2956 * Thread.main -> thread
2957 *
2958 * Returns the main thread.
2959 */
2960
2961static VALUE
2962rb_thread_s_main(VALUE klass)
2963{
2964 return rb_thread_main();
2965}
2966
2967
2968/*
2969 * call-seq:
2970 * Thread.abort_on_exception -> true or false
2971 *
2972 * Returns the status of the global ``abort on exception'' condition.
2973 *
2974 * The default is +false+.
2975 *
2976 * When set to +true+, if any thread is aborted by an exception, the
2977 * raised exception will be re-raised in the main thread.
2978 *
2979 * Can also be specified by the global $DEBUG flag or command line option
2980 * +-d+.
2981 *
2982 * See also ::abort_on_exception=.
2983 *
2984 * There is also an instance level method to set this for a specific thread,
2985 * see #abort_on_exception.
2986 */
2987
2988static VALUE
2989rb_thread_s_abort_exc(VALUE _)
2990{
2991 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
2992}
2993
2994
2995/*
2996 * call-seq:
2997 * Thread.abort_on_exception= boolean -> true or false
2998 *
2999 * When set to +true+, if any thread is aborted by an exception, the
3000 * raised exception will be re-raised in the main thread.
3001 * Returns the new state.
3002 *
3003 * Thread.abort_on_exception = true
3004 * t1 = Thread.new do
3005 * puts "In new thread"
3006 * raise "Exception from thread"
3007 * end
3008 * sleep(1)
3009 * puts "not reached"
3010 *
3011 * This will produce:
3012 *
3013 * In new thread
3014 * prog.rb:4: Exception from thread (RuntimeError)
3015 * from prog.rb:2:in `initialize'
3016 * from prog.rb:2:in `new'
3017 * from prog.rb:2
3018 *
3019 * See also ::abort_on_exception.
3020 *
3021 * There is also an instance level method to set this for a specific thread,
3022 * see #abort_on_exception=.
3023 */
3024
3025static VALUE
3026rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3027{
3028 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3029 return val;
3030}
3031
3032
3033/*
3034 * call-seq:
3035 * thr.abort_on_exception -> true or false
3036 *
3037 * Returns the status of the thread-local ``abort on exception'' condition for
3038 * this +thr+.
3039 *
3040 * The default is +false+.
3041 *
3042 * See also #abort_on_exception=.
3043 *
3044 * There is also a class level method to set this for all threads, see
3045 * ::abort_on_exception.
3046 */
3047
3048static VALUE
3049rb_thread_abort_exc(VALUE thread)
3050{
3051 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3052}
3053
3054
3055/*
3056 * call-seq:
3057 * thr.abort_on_exception= boolean -> true or false
3058 *
3059 * When set to +true+, if this +thr+ is aborted by an exception, the
3060 * raised exception will be re-raised in the main thread.
3061 *
3062 * See also #abort_on_exception.
3063 *
3064 * There is also a class level method to set this for all threads, see
3065 * ::abort_on_exception=.
3066 */
3067
3068static VALUE
3069rb_thread_abort_exc_set(VALUE thread, VALUE val)
3070{
3071 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3072 return val;
3073}
3074
3075
3076/*
3077 * call-seq:
3078 * Thread.report_on_exception -> true or false
3079 *
3080 * Returns the status of the global ``report on exception'' condition.
3081 *
3082 * The default is +true+ since Ruby 2.5.
3083 *
3084 * All threads created when this flag is true will report
3085 * a message on $stderr if an exception kills the thread.
3086 *
3087 * Thread.new { 1.times { raise } }
3088 *
3089 * will produce this output on $stderr:
3090 *
3091 * #<Thread:...> terminated with exception (report_on_exception is true):
3092 * Traceback (most recent call last):
3093 * 2: from -e:1:in `block in <main>'
3094 * 1: from -e:1:in `times'
3095 *
3096 * This is done to catch errors in threads early.
3097 * In some cases, you might not want this output.
3098 * There are multiple ways to avoid the extra output:
3099 *
3100 * * If the exception is not intended, the best is to fix the cause of
3101 * the exception so it does not happen anymore.
3102 * * If the exception is intended, it might be better to rescue it closer to
3103 * where it is raised rather then let it kill the Thread.
3104 * * If it is guaranteed the Thread will be joined with Thread#join or
3105 * Thread#value, then it is safe to disable this report with
3106 * <code>Thread.current.report_on_exception = false</code>
3107 * when starting the Thread.
3108 * However, this might handle the exception much later, or not at all
3109 * if the Thread is never joined due to the parent thread being blocked, etc.
3110 *
3111 * See also ::report_on_exception=.
3112 *
3113 * There is also an instance level method to set this for a specific thread,
3114 * see #report_on_exception=.
3115 *
3116 */
3117
3118static VALUE
3119rb_thread_s_report_exc(VALUE _)
3120{
3121 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3122}
3123
3124
3125/*
3126 * call-seq:
3127 * Thread.report_on_exception= boolean -> true or false
3128 *
3129 * Returns the new state.
3130 * When set to +true+, all threads created afterwards will inherit the
3131 * condition and report a message on $stderr if an exception kills a thread:
3132 *
3133 * Thread.report_on_exception = true
3134 * t1 = Thread.new do
3135 * puts "In new thread"
3136 * raise "Exception from thread"
3137 * end
3138 * sleep(1)
3139 * puts "In the main thread"
3140 *
3141 * This will produce:
3142 *
3143 * In new thread
3144 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3145 * Traceback (most recent call last):
3146 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3147 * In the main thread
3148 *
3149 * See also ::report_on_exception.
3150 *
3151 * There is also an instance level method to set this for a specific thread,
3152 * see #report_on_exception=.
3153 */
3154
3155static VALUE
3156rb_thread_s_report_exc_set(VALUE self, VALUE val)
3157{
3158 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3159 return val;
3160}
3161
3162
3163/*
3164 * call-seq:
3165 * Thread.ignore_deadlock -> true or false
3166 *
3167 * Returns the status of the global ``ignore deadlock'' condition.
3168 * The default is +false+, so that deadlock conditions are not ignored.
3169 *
3170 * See also ::ignore_deadlock=.
3171 *
3172 */
3173
3174static VALUE
3175rb_thread_s_ignore_deadlock(VALUE _)
3176{
3177 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3178}
3179
3180
3181/*
3182 * call-seq:
3183 * Thread.ignore_deadlock = boolean -> true or false
3184 *
3185 * Returns the new state.
3186 * When set to +true+, the VM will not check for deadlock conditions.
3187 * It is only useful to set this if your application can break a
3188 * deadlock condition via some other means, such as a signal.
3189 *
3190 * Thread.ignore_deadlock = true
3191 * queue = Thread::Queue.new
3192 *
3193 * trap(:SIGUSR1){queue.push "Received signal"}
3194 *
3195 * # raises fatal error unless ignoring deadlock
3196 * puts queue.pop
3197 *
3198 * See also ::ignore_deadlock.
3199 */
3200
3201static VALUE
3202rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3203{
3204 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3205 return val;
3206}
3207
3208
3209/*
3210 * call-seq:
3211 * thr.report_on_exception -> true or false
3212 *
3213 * Returns the status of the thread-local ``report on exception'' condition for
3214 * this +thr+.
3215 *
3216 * The default value when creating a Thread is the value of
3217 * the global flag Thread.report_on_exception.
3218 *
3219 * See also #report_on_exception=.
3220 *
3221 * There is also a class level method to set this for all new threads, see
3222 * ::report_on_exception=.
3223 */
3224
3225static VALUE
3226rb_thread_report_exc(VALUE thread)
3227{
3228 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3229}
3230
3231
3232/*
3233 * call-seq:
3234 * thr.report_on_exception= boolean -> true or false
3235 *
3236 * When set to +true+, a message is printed on $stderr if an exception
3237 * kills this +thr+. See ::report_on_exception for details.
3238 *
3239 * See also #report_on_exception.
3240 *
3241 * There is also a class level method to set this for all new threads, see
3242 * ::report_on_exception=.
3243 */
3244
3245static VALUE
3246rb_thread_report_exc_set(VALUE thread, VALUE val)
3247{
3248 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3249 return val;
3250}
3251
3252
3253/*
3254 * call-seq:
3255 * thr.group -> thgrp or nil
3256 *
3257 * Returns the ThreadGroup which contains the given thread.
3258 *
3259 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3260 */
3261
3262VALUE
3263rb_thread_group(VALUE thread)
3264{
3265 return rb_thread_ptr(thread)->thgroup;
3266}
3267
3268static const char *
3269thread_status_name(rb_thread_t *th, int detail)
3270{
3271 switch (th->status) {
3272 case THREAD_RUNNABLE:
3273 return th->to_kill ? "aborting" : "run";
3274 case THREAD_STOPPED_FOREVER:
3275 if (detail) return "sleep_forever";
3276 case THREAD_STOPPED:
3277 return "sleep";
3278 case THREAD_KILLED:
3279 return "dead";
3280 default:
3281 return "unknown";
3282 }
3283}
3284
3285static int
3286rb_threadptr_dead(rb_thread_t *th)
3287{
3288 return th->status == THREAD_KILLED;
3289}
3290
3291
3292/*
3293 * call-seq:
3294 * thr.status -> string, false or nil
3295 *
3296 * Returns the status of +thr+.
3297 *
3298 * [<tt>"sleep"</tt>]
3299 * Returned if this thread is sleeping or waiting on I/O
3300 * [<tt>"run"</tt>]
3301 * When this thread is executing
3302 * [<tt>"aborting"</tt>]
3303 * If this thread is aborting
3304 * [+false+]
3305 * When this thread is terminated normally
3306 * [+nil+]
3307 * If terminated with an exception.
3308 *
3309 * a = Thread.new { raise("die now") }
3310 * b = Thread.new { Thread.stop }
3311 * c = Thread.new { Thread.exit }
3312 * d = Thread.new { sleep }
3313 * d.kill #=> #<Thread:0x401b3678 aborting>
3314 * a.status #=> nil
3315 * b.status #=> "sleep"
3316 * c.status #=> false
3317 * d.status #=> "aborting"
3318 * Thread.current.status #=> "run"
3319 *
3320 * See also the instance methods #alive? and #stop?
3321 */
3322
3323static VALUE
3324rb_thread_status(VALUE thread)
3325{
3326 rb_thread_t *target_th = rb_thread_ptr(thread);
3327
3328 if (rb_threadptr_dead(target_th)) {
3329 if (!NIL_P(target_th->ec->errinfo) &&
3330 !FIXNUM_P(target_th->ec->errinfo)) {
3331 return Qnil;
3332 }
3333 else {
3334 return Qfalse;
3335 }
3336 }
3337 else {
3338 return rb_str_new2(thread_status_name(target_th, FALSE));
3339 }
3340}
3341
3342
3343/*
3344 * call-seq:
3345 * thr.alive? -> true or false
3346 *
3347 * Returns +true+ if +thr+ is running or sleeping.
3348 *
3349 * thr = Thread.new { }
3350 * thr.join #=> #<Thread:0x401b3fb0 dead>
3351 * Thread.current.alive? #=> true
3352 * thr.alive? #=> false
3353 *
3354 * See also #stop? and #status.
3355 */
3356
3357static VALUE
3358rb_thread_alive_p(VALUE thread)
3359{
3360 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3361}
3362
3363/*
3364 * call-seq:
3365 * thr.stop? -> true or false
3366 *
3367 * Returns +true+ if +thr+ is dead or sleeping.
3368 *
3369 * a = Thread.new { Thread.stop }
3370 * b = Thread.current
3371 * a.stop? #=> true
3372 * b.stop? #=> false
3373 *
3374 * See also #alive? and #status.
3375 */
3376
3377static VALUE
3378rb_thread_stop_p(VALUE thread)
3379{
3380 rb_thread_t *th = rb_thread_ptr(thread);
3381
3382 if (rb_threadptr_dead(th)) {
3383 return Qtrue;
3384 }
3385 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3386}
3387
3388/*
3389 * call-seq:
3390 * thr.name -> string
3391 *
3392 * show the name of the thread.
3393 */
3394
3395static VALUE
3396rb_thread_getname(VALUE thread)
3397{
3398 return rb_thread_ptr(thread)->name;
3399}
3400
3401/*
3402 * call-seq:
3403 * thr.name=(name) -> string
3404 *
3405 * set given name to the ruby thread.
3406 * On some platform, it may set the name to pthread and/or kernel.
3407 */
3408
3409static VALUE
3410rb_thread_setname(VALUE thread, VALUE name)
3411{
3412 rb_thread_t *target_th = rb_thread_ptr(thread);
3413
3414 if (!NIL_P(name)) {
3415 rb_encoding *enc;
3416 StringValueCStr(name);
3417 enc = rb_enc_get(name);
3418 if (!rb_enc_asciicompat(enc)) {
3419 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3420 rb_enc_name(enc));
3421 }
3422 name = rb_str_new_frozen(name);
3423 }
3424 target_th->name = name;
3425 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3426 native_set_another_thread_name(target_th->nt->thread_id, name);
3427 }
3428 return name;
3429}
3430
3431#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3432/*
3433 * call-seq:
3434 * thr.native_thread_id -> integer
3435 *
3436 * Return the native thread ID which is used by the Ruby thread.
3437 *
3438 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3439 * * On Linux it is TID returned by gettid(2).
3440 * * On macOS it is the system-wide unique integral ID of thread returned
3441 * by pthread_threadid_np(3).
3442 * * On FreeBSD it is the unique integral ID of the thread returned by
3443 * pthread_getthreadid_np(3).
3444 * * On Windows it is the thread identifier returned by GetThreadId().
3445 * * On other platforms, it raises NotImplementedError.
3446 *
3447 * NOTE:
3448 * If the thread is not associated yet or already deassociated with a native
3449 * thread, it returns _nil_.
3450 * If the Ruby implementation uses M:N thread model, the ID may change
3451 * depending on the timing.
3452 */
3453
3454static VALUE
3455rb_thread_native_thread_id(VALUE thread)
3456{
3457 rb_thread_t *target_th = rb_thread_ptr(thread);
3458 if (rb_threadptr_dead(target_th)) return Qnil;
3459 return native_thread_native_thread_id(target_th);
3460}
3461#else
3462# define rb_thread_native_thread_id rb_f_notimplement
3463#endif
3464
3465/*
3466 * call-seq:
3467 * thr.to_s -> string
3468 *
3469 * Dump the name, id, and status of _thr_ to a string.
3470 */
3471
3472static VALUE
3473rb_thread_to_s(VALUE thread)
3474{
3475 VALUE cname = rb_class_path(rb_obj_class(thread));
3476 rb_thread_t *target_th = rb_thread_ptr(thread);
3477 const char *status;
3478 VALUE str, loc;
3479
3480 status = thread_status_name(target_th, TRUE);
3481 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3482 if (!NIL_P(target_th->name)) {
3483 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3484 }
3485 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3486 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3487 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3488 }
3489 rb_str_catf(str, " %s>", status);
3490
3491 return str;
3492}
3493
3494/* variables for recursive traversals */
3495#define recursive_key id__recursive_key__
3496
3497static VALUE
3498threadptr_local_aref(rb_thread_t *th, ID id)
3499{
3500 if (id == recursive_key) {
3501 return th->ec->local_storage_recursive_hash;
3502 }
3503 else {
3504 VALUE val;
3505 struct rb_id_table *local_storage = th->ec->local_storage;
3506
3507 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3508 return val;
3509 }
3510 else {
3511 return Qnil;
3512 }
3513 }
3514}
3515
3517rb_thread_local_aref(VALUE thread, ID id)
3518{
3519 return threadptr_local_aref(rb_thread_ptr(thread), id);
3520}
3521
3522/*
3523 * call-seq:
3524 * thr[sym] -> obj or nil
3525 *
3526 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3527 * if not explicitly inside a Fiber), using either a symbol or a string name.
3528 * If the specified variable does not exist, returns +nil+.
3529 *
3530 * [
3531 * Thread.new { Thread.current["name"] = "A" },
3532 * Thread.new { Thread.current[:name] = "B" },
3533 * Thread.new { Thread.current["name"] = "C" }
3534 * ].each do |th|
3535 * th.join
3536 * puts "#{th.inspect}: #{th[:name]}"
3537 * end
3538 *
3539 * This will produce:
3540 *
3541 * #<Thread:0x00000002a54220 dead>: A
3542 * #<Thread:0x00000002a541a8 dead>: B
3543 * #<Thread:0x00000002a54130 dead>: C
3544 *
3545 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3546 * This confusion did not exist in Ruby 1.8 because
3547 * fibers are only available since Ruby 1.9.
3548 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3549 * following idiom for dynamic scope.
3550 *
3551 * def meth(newvalue)
3552 * begin
3553 * oldvalue = Thread.current[:name]
3554 * Thread.current[:name] = newvalue
3555 * yield
3556 * ensure
3557 * Thread.current[:name] = oldvalue
3558 * end
3559 * end
3560 *
3561 * The idiom may not work as dynamic scope if the methods are thread-local
3562 * and a given block switches fiber.
3563 *
3564 * f = Fiber.new {
3565 * meth(1) {
3566 * Fiber.yield
3567 * }
3568 * }
3569 * meth(2) {
3570 * f.resume
3571 * }
3572 * f.resume
3573 * p Thread.current[:name]
3574 * #=> nil if fiber-local
3575 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3576 *
3577 * For thread-local variables, please see #thread_variable_get and
3578 * #thread_variable_set.
3579 *
3580 */
3581
3582static VALUE
3583rb_thread_aref(VALUE thread, VALUE key)
3584{
3585 ID id = rb_check_id(&key);
3586 if (!id) return Qnil;
3587 return rb_thread_local_aref(thread, id);
3588}
3589
3590/*
3591 * call-seq:
3592 * thr.fetch(sym) -> obj
3593 * thr.fetch(sym) { } -> obj
3594 * thr.fetch(sym, default) -> obj
3595 *
3596 * Returns a fiber-local for the given key. If the key can't be
3597 * found, there are several options: With no other arguments, it will
3598 * raise a KeyError exception; if <i>default</i> is given, then that
3599 * will be returned; if the optional code block is specified, then
3600 * that will be run and its result returned. See Thread#[] and
3601 * Hash#fetch.
3602 */
3603static VALUE
3604rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3605{
3606 VALUE key, val;
3607 ID id;
3608 rb_thread_t *target_th = rb_thread_ptr(self);
3609 int block_given;
3610
3611 rb_check_arity(argc, 1, 2);
3612 key = argv[0];
3613
3614 block_given = rb_block_given_p();
3615 if (block_given && argc == 2) {
3616 rb_warn("block supersedes default value argument");
3617 }
3618
3619 id = rb_check_id(&key);
3620
3621 if (id == recursive_key) {
3622 return target_th->ec->local_storage_recursive_hash;
3623 }
3624 else if (id && target_th->ec->local_storage &&
3625 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3626 return val;
3627 }
3628 else if (block_given) {
3629 return rb_yield(key);
3630 }
3631 else if (argc == 1) {
3632 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3633 }
3634 else {
3635 return argv[1];
3636 }
3637}
3638
3639static VALUE
3640threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3641{
3642 if (id == recursive_key) {
3643 th->ec->local_storage_recursive_hash = val;
3644 return val;
3645 }
3646 else {
3647 struct rb_id_table *local_storage = th->ec->local_storage;
3648
3649 if (NIL_P(val)) {
3650 if (!local_storage) return Qnil;
3651 rb_id_table_delete(local_storage, id);
3652 return Qnil;
3653 }
3654 else {
3655 if (local_storage == NULL) {
3656 th->ec->local_storage = local_storage = rb_id_table_create(0);
3657 }
3658 rb_id_table_insert(local_storage, id, val);
3659 return val;
3660 }
3661 }
3662}
3663
3665rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3666{
3667 if (OBJ_FROZEN(thread)) {
3668 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3669 }
3670
3671 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3672}
3673
3674/*
3675 * call-seq:
3676 * thr[sym] = obj -> obj
3677 *
3678 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3679 * using either a symbol or a string.
3680 *
3681 * See also Thread#[].
3682 *
3683 * For thread-local variables, please see #thread_variable_set and
3684 * #thread_variable_get.
3685 */
3686
3687static VALUE
3688rb_thread_aset(VALUE self, VALUE id, VALUE val)
3689{
3690 return rb_thread_local_aset(self, rb_to_id(id), val);
3691}
3692
3693/*
3694 * call-seq:
3695 * thr.thread_variable_get(key) -> obj or nil
3696 *
3697 * Returns the value of a thread local variable that has been set. Note that
3698 * these are different than fiber local values. For fiber local values,
3699 * please see Thread#[] and Thread#[]=.
3700 *
3701 * Thread local values are carried along with threads, and do not respect
3702 * fibers. For example:
3703 *
3704 * Thread.new {
3705 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3706 * Thread.current["foo"] = "bar" # set a fiber local
3707 *
3708 * Fiber.new {
3709 * Fiber.yield [
3710 * Thread.current.thread_variable_get("foo"), # get the thread local
3711 * Thread.current["foo"], # get the fiber local
3712 * ]
3713 * }.resume
3714 * }.join.value # => ['bar', nil]
3715 *
3716 * The value "bar" is returned for the thread local, where nil is returned
3717 * for the fiber local. The fiber is executed in the same thread, so the
3718 * thread local values are available.
3719 */
3720
3721static VALUE
3722rb_thread_variable_get(VALUE thread, VALUE key)
3723{
3724 VALUE locals;
3725
3726 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3727 return Qnil;
3728 }
3729 locals = rb_thread_local_storage(thread);
3730 return rb_hash_aref(locals, rb_to_symbol(key));
3731}
3732
3733/*
3734 * call-seq:
3735 * thr.thread_variable_set(key, value)
3736 *
3737 * Sets a thread local with +key+ to +value+. Note that these are local to
3738 * threads, and not to fibers. Please see Thread#thread_variable_get and
3739 * Thread#[] for more information.
3740 */
3741
3742static VALUE
3743rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3744{
3745 VALUE locals;
3746
3747 if (OBJ_FROZEN(thread)) {
3748 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3749 }
3750
3751 locals = rb_thread_local_storage(thread);
3752 return rb_hash_aset(locals, rb_to_symbol(key), val);
3753}
3754
3755/*
3756 * call-seq:
3757 * thr.key?(sym) -> true or false
3758 *
3759 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3760 * variable.
3761 *
3762 * me = Thread.current
3763 * me[:oliver] = "a"
3764 * me.key?(:oliver) #=> true
3765 * me.key?(:stanley) #=> false
3766 */
3767
3768static VALUE
3769rb_thread_key_p(VALUE self, VALUE key)
3770{
3771 VALUE val;
3772 ID id = rb_check_id(&key);
3773 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3774
3775 if (!id || local_storage == NULL) {
3776 return Qfalse;
3777 }
3778 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3779}
3780
3781static enum rb_id_table_iterator_result
3782thread_keys_i(ID key, VALUE value, void *ary)
3783{
3784 rb_ary_push((VALUE)ary, ID2SYM(key));
3785 return ID_TABLE_CONTINUE;
3786}
3787
3789rb_thread_alone(void)
3790{
3791 // TODO
3792 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3793}
3794
3795/*
3796 * call-seq:
3797 * thr.keys -> array
3798 *
3799 * Returns an array of the names of the fiber-local variables (as Symbols).
3800 *
3801 * thr = Thread.new do
3802 * Thread.current[:cat] = 'meow'
3803 * Thread.current["dog"] = 'woof'
3804 * end
3805 * thr.join #=> #<Thread:0x401b3f10 dead>
3806 * thr.keys #=> [:dog, :cat]
3807 */
3808
3809static VALUE
3810rb_thread_keys(VALUE self)
3811{
3812 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3813 VALUE ary = rb_ary_new();
3814
3815 if (local_storage) {
3816 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3817 }
3818 return ary;
3819}
3820
3821static int
3822keys_i(VALUE key, VALUE value, VALUE ary)
3823{
3824 rb_ary_push(ary, key);
3825 return ST_CONTINUE;
3826}
3827
3828/*
3829 * call-seq:
3830 * thr.thread_variables -> array
3831 *
3832 * Returns an array of the names of the thread-local variables (as Symbols).
3833 *
3834 * thr = Thread.new do
3835 * Thread.current.thread_variable_set(:cat, 'meow')
3836 * Thread.current.thread_variable_set("dog", 'woof')
3837 * end
3838 * thr.join #=> #<Thread:0x401b3f10 dead>
3839 * thr.thread_variables #=> [:dog, :cat]
3840 *
3841 * Note that these are not fiber local variables. Please see Thread#[] and
3842 * Thread#thread_variable_get for more details.
3843 */
3844
3845static VALUE
3846rb_thread_variables(VALUE thread)
3847{
3848 VALUE locals;
3849 VALUE ary;
3850
3851 ary = rb_ary_new();
3852 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3853 return ary;
3854 }
3855 locals = rb_thread_local_storage(thread);
3856 rb_hash_foreach(locals, keys_i, ary);
3857
3858 return ary;
3859}
3860
3861/*
3862 * call-seq:
3863 * thr.thread_variable?(key) -> true or false
3864 *
3865 * Returns +true+ if the given string (or symbol) exists as a thread-local
3866 * variable.
3867 *
3868 * me = Thread.current
3869 * me.thread_variable_set(:oliver, "a")
3870 * me.thread_variable?(:oliver) #=> true
3871 * me.thread_variable?(:stanley) #=> false
3872 *
3873 * Note that these are not fiber local variables. Please see Thread#[] and
3874 * Thread#thread_variable_get for more details.
3875 */
3876
3877static VALUE
3878rb_thread_variable_p(VALUE thread, VALUE key)
3879{
3880 VALUE locals;
3881
3882 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3883 return Qfalse;
3884 }
3885 locals = rb_thread_local_storage(thread);
3886
3887 return RBOOL(rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil);
3888}
3889
3890/*
3891 * call-seq:
3892 * thr.priority -> integer
3893 *
3894 * Returns the priority of <i>thr</i>. Default is inherited from the
3895 * current thread which creating the new thread, or zero for the
3896 * initial main thread; higher-priority thread will run more frequently
3897 * than lower-priority threads (but lower-priority threads can also run).
3898 *
3899 * This is just hint for Ruby thread scheduler. It may be ignored on some
3900 * platform.
3901 *
3902 * Thread.current.priority #=> 0
3903 */
3904
3905static VALUE
3906rb_thread_priority(VALUE thread)
3907{
3908 return INT2NUM(rb_thread_ptr(thread)->priority);
3909}
3910
3911
3912/*
3913 * call-seq:
3914 * thr.priority= integer -> thr
3915 *
3916 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3917 * will run more frequently than lower-priority threads (but lower-priority
3918 * threads can also run).
3919 *
3920 * This is just hint for Ruby thread scheduler. It may be ignored on some
3921 * platform.
3922 *
3923 * count1 = count2 = 0
3924 * a = Thread.new do
3925 * loop { count1 += 1 }
3926 * end
3927 * a.priority = -1
3928 *
3929 * b = Thread.new do
3930 * loop { count2 += 1 }
3931 * end
3932 * b.priority = -2
3933 * sleep 1 #=> 1
3934 * count1 #=> 622504
3935 * count2 #=> 5832
3936 */
3937
3938static VALUE
3939rb_thread_priority_set(VALUE thread, VALUE prio)
3940{
3941 rb_thread_t *target_th = rb_thread_ptr(thread);
3942 int priority;
3943
3944#if USE_NATIVE_THREAD_PRIORITY
3945 target_th->priority = NUM2INT(prio);
3946 native_thread_apply_priority(th);
3947#else
3948 priority = NUM2INT(prio);
3949 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3950 priority = RUBY_THREAD_PRIORITY_MAX;
3951 }
3952 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3953 priority = RUBY_THREAD_PRIORITY_MIN;
3954 }
3955 target_th->priority = (int8_t)priority;
3956#endif
3957 return INT2NUM(target_th->priority);
3958}
3959
3960/* for IO */
3961
3962#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3963
3964/*
3965 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3966 * in select(2) system call.
3967 *
3968 * - Linux 2.2.12 (?)
3969 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3970 * select(2) documents how to allocate fd_set dynamically.
3971 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3972 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3973 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3974 * select(2) documents how to allocate fd_set dynamically.
3975 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3976 * - Solaris 8 has select_large_fdset
3977 * - Mac OS X 10.7 (Lion)
3978 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3979 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3980 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
3981 *
3982 * When fd_set is not big enough to hold big file descriptors,
3983 * it should be allocated dynamically.
3984 * Note that this assumes fd_set is structured as bitmap.
3985 *
3986 * rb_fd_init allocates the memory.
3987 * rb_fd_term free the memory.
3988 * rb_fd_set may re-allocates bitmap.
3989 *
3990 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3991 */
3992
3993void
3995{
3996 fds->maxfd = 0;
3997 fds->fdset = ALLOC(fd_set);
3998 FD_ZERO(fds->fdset);
3999}
4000
4001void
4002rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4003{
4004 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4005
4006 if (size < sizeof(fd_set))
4007 size = sizeof(fd_set);
4008 dst->maxfd = src->maxfd;
4009 dst->fdset = xmalloc(size);
4010 memcpy(dst->fdset, src->fdset, size);
4011}
4012
4013void
4015{
4016 xfree(fds->fdset);
4017 fds->maxfd = 0;
4018 fds->fdset = 0;
4019}
4020
4021void
4023{
4024 if (fds->fdset)
4025 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4026}
4027
4028static void
4029rb_fd_resize(int n, rb_fdset_t *fds)
4030{
4031 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4032 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4033
4034 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4035 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4036
4037 if (m > o) {
4038 fds->fdset = xrealloc(fds->fdset, m);
4039 memset((char *)fds->fdset + o, 0, m - o);
4040 }
4041 if (n >= fds->maxfd) fds->maxfd = n + 1;
4042}
4043
4044void
4045rb_fd_set(int n, rb_fdset_t *fds)
4046{
4047 rb_fd_resize(n, fds);
4048 FD_SET(n, fds->fdset);
4049}
4050
4051void
4052rb_fd_clr(int n, rb_fdset_t *fds)
4053{
4054 if (n >= fds->maxfd) return;
4055 FD_CLR(n, fds->fdset);
4056}
4057
4058int
4059rb_fd_isset(int n, const rb_fdset_t *fds)
4060{
4061 if (n >= fds->maxfd) return 0;
4062 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4063}
4064
4065void
4066rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4067{
4068 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4069
4070 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4071 dst->maxfd = max;
4072 dst->fdset = xrealloc(dst->fdset, size);
4073 memcpy(dst->fdset, src, size);
4074}
4075
4076void
4077rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4078{
4079 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4080
4081 if (size < sizeof(fd_set))
4082 size = sizeof(fd_set);
4083 dst->maxfd = src->maxfd;
4084 dst->fdset = xrealloc(dst->fdset, size);
4085 memcpy(dst->fdset, src->fdset, size);
4086}
4087
4088int
4089rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4090{
4091 fd_set *r = NULL, *w = NULL, *e = NULL;
4092 if (readfds) {
4093 rb_fd_resize(n - 1, readfds);
4094 r = rb_fd_ptr(readfds);
4095 }
4096 if (writefds) {
4097 rb_fd_resize(n - 1, writefds);
4098 w = rb_fd_ptr(writefds);
4099 }
4100 if (exceptfds) {
4101 rb_fd_resize(n - 1, exceptfds);
4102 e = rb_fd_ptr(exceptfds);
4103 }
4104 return select(n, r, w, e, timeout);
4105}
4106
4107#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4108
4109#undef FD_ZERO
4110#undef FD_SET
4111#undef FD_CLR
4112#undef FD_ISSET
4113
4114#define FD_ZERO(f) rb_fd_zero(f)
4115#define FD_SET(i, f) rb_fd_set((i), (f))
4116#define FD_CLR(i, f) rb_fd_clr((i), (f))
4117#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4118
4119#elif defined(_WIN32)
4120
4121void
4123{
4124 set->capa = FD_SETSIZE;
4125 set->fdset = ALLOC(fd_set);
4126 FD_ZERO(set->fdset);
4127}
4128
4129void
4130rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4131{
4132 rb_fd_init(dst);
4133 rb_fd_dup(dst, src);
4134}
4135
4136void
4138{
4139 xfree(set->fdset);
4140 set->fdset = NULL;
4141 set->capa = 0;
4142}
4143
4144void
4145rb_fd_set(int fd, rb_fdset_t *set)
4146{
4147 unsigned int i;
4148 SOCKET s = rb_w32_get_osfhandle(fd);
4149
4150 for (i = 0; i < set->fdset->fd_count; i++) {
4151 if (set->fdset->fd_array[i] == s) {
4152 return;
4153 }
4154 }
4155 if (set->fdset->fd_count >= (unsigned)set->capa) {
4156 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4157 set->fdset =
4158 rb_xrealloc_mul_add(
4159 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4160 }
4161 set->fdset->fd_array[set->fdset->fd_count++] = s;
4162}
4163
4164#undef FD_ZERO
4165#undef FD_SET
4166#undef FD_CLR
4167#undef FD_ISSET
4168
4169#define FD_ZERO(f) rb_fd_zero(f)
4170#define FD_SET(i, f) rb_fd_set((i), (f))
4171#define FD_CLR(i, f) rb_fd_clr((i), (f))
4172#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4173
4174#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4175
4176#endif
4177
4178#ifndef rb_fd_no_init
4179#define rb_fd_no_init(fds) (void)(fds)
4180#endif
4181
4182static int
4183wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4184{
4185 if (*result < 0) {
4186 switch (errnum) {
4187 case EINTR:
4188#ifdef ERESTART
4189 case ERESTART:
4190#endif
4191 *result = 0;
4192 if (rel && hrtime_update_expire(rel, end)) {
4193 *rel = 0;
4194 }
4195 return TRUE;
4196 }
4197 return FALSE;
4198 }
4199 else if (*result == 0) {
4200 /* check for spurious wakeup */
4201 if (rel) {
4202 return !hrtime_update_expire(rel, end);
4203 }
4204 return TRUE;
4205 }
4206 return FALSE;
4207}
4209struct select_set {
4210 int max;
4211 rb_thread_t *th;
4212 rb_fdset_t *rset;
4213 rb_fdset_t *wset;
4214 rb_fdset_t *eset;
4215 rb_fdset_t orig_rset;
4216 rb_fdset_t orig_wset;
4217 rb_fdset_t orig_eset;
4218 struct timeval *timeout;
4219};
4220
4221static VALUE
4222select_set_free(VALUE p)
4223{
4224 struct select_set *set = (struct select_set *)p;
4225
4226 rb_fd_term(&set->orig_rset);
4227 rb_fd_term(&set->orig_wset);
4228 rb_fd_term(&set->orig_eset);
4229
4230 return Qfalse;
4231}
4232
4233static VALUE
4234do_select(VALUE p)
4235{
4236 struct select_set *set = (struct select_set *)p;
4237 volatile int result = 0;
4238 int lerrno;
4239 rb_hrtime_t *to, rel, end = 0;
4240
4241 timeout_prepare(&to, &rel, &end, set->timeout);
4242 volatile rb_hrtime_t endtime = end;
4243#define restore_fdset(dst, src) \
4244 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4245#define do_select_update() \
4246 (restore_fdset(set->rset, &set->orig_rset), \
4247 restore_fdset(set->wset, &set->orig_wset), \
4248 restore_fdset(set->eset, &set->orig_eset), \
4249 TRUE)
4250
4251 do {
4252 lerrno = 0;
4253
4254 BLOCKING_REGION(set->th, {
4255 struct timeval tv;
4256
4257 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4258 result = native_fd_select(set->max,
4259 set->rset, set->wset, set->eset,
4260 rb_hrtime2timeval(&tv, to), set->th);
4261 if (result < 0) lerrno = errno;
4262 }
4263 }, ubf_select, set->th, TRUE);
4264
4265 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4266 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4267
4268 if (result < 0) {
4269 errno = lerrno;
4270 }
4271
4272 return (VALUE)result;
4273}
4274
4276rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4277 struct timeval *timeout)
4278{
4279 struct select_set set;
4280
4281 set.th = GET_THREAD();
4282 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4283 set.max = max;
4284 set.rset = read;
4285 set.wset = write;
4286 set.eset = except;
4287 set.timeout = timeout;
4288
4289 if (!set.rset && !set.wset && !set.eset) {
4290 if (!timeout) {
4292 return 0;
4293 }
4294 rb_thread_wait_for(*timeout);
4295 return 0;
4296 }
4297
4298#define fd_init_copy(f) do { \
4299 if (set.f) { \
4300 rb_fd_resize(set.max - 1, set.f); \
4301 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4302 rb_fd_init_copy(&set.orig_##f, set.f); \
4303 } \
4304 } \
4305 else { \
4306 rb_fd_no_init(&set.orig_##f); \
4307 } \
4308 } while (0)
4309 fd_init_copy(rset);
4310 fd_init_copy(wset);
4311 fd_init_copy(eset);
4312#undef fd_init_copy
4313
4314 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4315}
4316
4317#ifdef USE_POLL
4318
4319/* The same with linux kernel. TODO: make platform independent definition. */
4320#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4321#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4322#define POLLEX_SET (POLLPRI)
4323
4324#ifndef POLLERR_SET /* defined for FreeBSD for now */
4325# define POLLERR_SET (0)
4326#endif
4327
4328/*
4329 * returns a mask of events
4330 */
4331int
4332rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4333{
4334 struct pollfd fds[1];
4335 volatile int result = 0;
4336 nfds_t nfds;
4337 struct waiting_fd wfd;
4338 int state;
4339 volatile int lerrno;
4340
4341 rb_execution_context_t *ec = GET_EC();
4342 rb_thread_t *th = rb_ec_thread_ptr(ec);
4343
4344 if (thread_io_wait_events(th, ec, fd, events, timeout, &wfd)) {
4345 return 0; // timeout
4346 }
4347
4348 thread_io_setup_wfd(th, fd, &wfd);
4349
4350 EC_PUSH_TAG(wfd.th->ec);
4351 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4352 rb_hrtime_t *to, rel, end = 0;
4353 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4354 timeout_prepare(&to, &rel, &end, timeout);
4355 volatile rb_hrtime_t endtime = end;
4356 fds[0].fd = fd;
4357 fds[0].events = (short)events;
4358 fds[0].revents = 0;
4359 do {
4360 nfds = 1;
4361
4362 lerrno = 0;
4363 BLOCKING_REGION(wfd.th, {
4364 struct timespec ts;
4365
4366 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4367 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4368 if (result < 0) lerrno = errno;
4369 }
4370 }, ubf_select, wfd.th, TRUE);
4371
4372 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4373 } while (wait_retryable(&result, lerrno, to, endtime));
4374 }
4375 EC_POP_TAG();
4376
4377 thread_io_wake_pending_closer(&wfd);
4378
4379 if (state) {
4380 EC_JUMP_TAG(wfd.th->ec, state);
4381 }
4382
4383 if (result < 0) {
4384 errno = lerrno;
4385 return -1;
4386 }
4387
4388 if (fds[0].revents & POLLNVAL) {
4389 errno = EBADF;
4390 return -1;
4391 }
4392
4393 /*
4394 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4395 * Therefore we need to fix it up.
4396 */
4397 result = 0;
4398 if (fds[0].revents & POLLIN_SET)
4399 result |= RB_WAITFD_IN;
4400 if (fds[0].revents & POLLOUT_SET)
4401 result |= RB_WAITFD_OUT;
4402 if (fds[0].revents & POLLEX_SET)
4403 result |= RB_WAITFD_PRI;
4404
4405 /* all requested events are ready if there is an error */
4406 if (fds[0].revents & POLLERR_SET)
4407 result |= events;
4408
4409 return result;
4410}
4411#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4412struct select_args {
4413 union {
4414 int fd;
4415 int error;
4416 } as;
4417 rb_fdset_t *read;
4418 rb_fdset_t *write;
4419 rb_fdset_t *except;
4420 struct waiting_fd wfd;
4421 struct timeval *tv;
4422};
4423
4424static VALUE
4425select_single(VALUE ptr)
4426{
4427 struct select_args *args = (struct select_args *)ptr;
4428 int r;
4429
4430 r = rb_thread_fd_select(args->as.fd + 1,
4431 args->read, args->write, args->except, args->tv);
4432 if (r == -1)
4433 args->as.error = errno;
4434 if (r > 0) {
4435 r = 0;
4436 if (args->read && rb_fd_isset(args->as.fd, args->read))
4437 r |= RB_WAITFD_IN;
4438 if (args->write && rb_fd_isset(args->as.fd, args->write))
4439 r |= RB_WAITFD_OUT;
4440 if (args->except && rb_fd_isset(args->as.fd, args->except))
4441 r |= RB_WAITFD_PRI;
4442 }
4443 return (VALUE)r;
4444}
4445
4446static VALUE
4447select_single_cleanup(VALUE ptr)
4448{
4449 struct select_args *args = (struct select_args *)ptr;
4450
4451 thread_io_wake_pending_closer(&args->wfd);
4452 if (args->read) rb_fd_term(args->read);
4453 if (args->write) rb_fd_term(args->write);
4454 if (args->except) rb_fd_term(args->except);
4455
4456 return (VALUE)-1;
4457}
4458
4459static rb_fdset_t *
4460init_set_fd(int fd, rb_fdset_t *fds)
4461{
4462 if (fd < 0) {
4463 return 0;
4464 }
4465 rb_fd_init(fds);
4466 rb_fd_set(fd, fds);
4467
4468 return fds;
4469}
4470
4471int
4472rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4473{
4474 rb_fdset_t rfds, wfds, efds;
4475 struct select_args args;
4476 int r;
4477 VALUE ptr = (VALUE)&args;
4478 rb_execution_context_t *ec = GET_EC();
4479 rb_thread_t *th = rb_ec_thread_ptr(ec);
4480
4481 if (thread_io_wait_events(th, ec, fd, events, timeout, &args.wfd)) {
4482 return 0; // timeout
4483 }
4484
4485 args.as.fd = fd;
4486 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4487 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4488 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4489 args.tv = timeout;
4490 args.wfd.fd = fd;
4491 args.wfd.th = th;
4492 args.wfd.busy = NULL;
4493
4494 RB_VM_LOCK_ENTER();
4495 {
4496 ccan_list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4497 }
4498 RB_VM_LOCK_LEAVE();
4499
4500 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4501 if (r == -1)
4502 errno = args.as.error;
4503
4504 return r;
4505}
4506#endif /* ! USE_POLL */
4507
4508/*
4509 * for GC
4510 */
4511
4512#ifdef USE_CONSERVATIVE_STACK_END
4513void
4514rb_gc_set_stack_end(VALUE **stack_end_p)
4515{
4516 VALUE stack_end;
4517 *stack_end_p = &stack_end;
4518}
4519#endif
4520
4521/*
4522 *
4523 */
4524
4525void
4526rb_threadptr_check_signal(rb_thread_t *mth)
4527{
4528 /* mth must be main_thread */
4529 if (rb_signal_buff_size() > 0) {
4530 /* wakeup main thread */
4531 threadptr_trap_interrupt(mth);
4532 }
4533}
4534
4535static void
4536async_bug_fd(const char *mesg, int errno_arg, int fd)
4537{
4538 char buff[64];
4539 size_t n = strlcpy(buff, mesg, sizeof(buff));
4540 if (n < sizeof(buff)-3) {
4541 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4542 }
4543 rb_async_bug_errno(buff, errno_arg);
4544}
4545
4546/* VM-dependent API is not available for this function */
4547static int
4548consume_communication_pipe(int fd)
4549{
4550#if USE_EVENTFD
4551 uint64_t buff[1];
4552#else
4553 /* buffer can be shared because no one refers to them. */
4554 static char buff[1024];
4555#endif
4556 ssize_t result;
4557 int ret = FALSE; /* for rb_sigwait_sleep */
4558
4559 while (1) {
4560 result = read(fd, buff, sizeof(buff));
4561#if USE_EVENTFD
4562 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4563#else
4564 RUBY_DEBUG_LOG("result:%d", (int)result);
4565#endif
4566 if (result > 0) {
4567 ret = TRUE;
4568 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4569 return ret;
4570 }
4571 }
4572 else if (result == 0) {
4573 return ret;
4574 }
4575 else if (result < 0) {
4576 int e = errno;
4577 switch (e) {
4578 case EINTR:
4579 continue; /* retry */
4580 case EAGAIN:
4581#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4582 case EWOULDBLOCK:
4583#endif
4584 return ret;
4585 default:
4586 async_bug_fd("consume_communication_pipe: read", e, fd);
4587 }
4588 }
4589 }
4590}
4591
4592void
4593rb_thread_stop_timer_thread(void)
4594{
4595 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4596 native_reset_timer_thread();
4597 }
4598}
4599
4600void
4601rb_thread_reset_timer_thread(void)
4602{
4603 native_reset_timer_thread();
4604}
4605
4606void
4607rb_thread_start_timer_thread(void)
4608{
4609 system_working = 1;
4610 rb_thread_create_timer_thread();
4611}
4612
4613static int
4614clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4615{
4616 int i;
4617 VALUE coverage = (VALUE)val;
4618 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4619 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4620
4621 if (lines) {
4622 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4623 rb_ary_clear(lines);
4624 }
4625 else {
4626 int i;
4627 for (i = 0; i < RARRAY_LEN(lines); i++) {
4628 if (RARRAY_AREF(lines, i) != Qnil)
4629 RARRAY_ASET(lines, i, INT2FIX(0));
4630 }
4631 }
4632 }
4633 if (branches) {
4634 VALUE counters = RARRAY_AREF(branches, 1);
4635 for (i = 0; i < RARRAY_LEN(counters); i++) {
4636 RARRAY_ASET(counters, i, INT2FIX(0));
4637 }
4638 }
4639
4640 return ST_CONTINUE;
4641}
4642
4643void
4644rb_clear_coverages(void)
4645{
4646 VALUE coverages = rb_get_coverages();
4647 if (RTEST(coverages)) {
4648 rb_hash_foreach(coverages, clear_coverage_i, 0);
4649 }
4650}
4651
4652#if defined(HAVE_WORKING_FORK)
4653
4654static void
4655rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4656{
4657 rb_thread_t *i = 0;
4658 rb_vm_t *vm = th->vm;
4659 rb_ractor_t *r = th->ractor;
4660 vm->ractor.main_ractor = r;
4661 vm->ractor.main_thread = th;
4662 r->threads.main = th;
4663 r->status_ = ractor_created;
4664
4665 thread_sched_atfork(TH_SCHED(th));
4666 ubf_list_atfork();
4667
4668 // OK. Only this thread accesses:
4669 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4670 ccan_list_for_each(&r->threads.set, i, lt_node) {
4671 atfork(i, th);
4672 }
4673 }
4674 rb_vm_living_threads_init(vm);
4675
4676 rb_ractor_atfork(vm, th);
4677 rb_vm_postponed_job_atfork();
4678
4679 /* may be held by RJIT threads in parent */
4680 rb_native_mutex_initialize(&vm->workqueue_lock);
4681
4682 /* may be held by any thread in parent */
4683 rb_native_mutex_initialize(&th->interrupt_lock);
4684
4685 vm->fork_gen++;
4686 rb_ractor_sleeper_threads_clear(th->ractor);
4687 rb_clear_coverages();
4688
4689 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4690 rb_thread_reset_timer_thread();
4691 rb_thread_start_timer_thread();
4692
4693 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4694 VM_ASSERT(vm->ractor.cnt == 1);
4695}
4696
4697static void
4698terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4699{
4700 if (th != current_th) {
4701 rb_mutex_abandon_keeping_mutexes(th);
4702 rb_mutex_abandon_locking_mutex(th);
4703 thread_cleanup_func(th, TRUE);
4704 }
4705}
4706
4707void rb_fiber_atfork(rb_thread_t *);
4708void
4709rb_thread_atfork(void)
4710{
4711 rb_thread_t *th = GET_THREAD();
4712 rb_threadptr_pending_interrupt_clear(th);
4713 rb_thread_atfork_internal(th, terminate_atfork_i);
4714 th->join_list = NULL;
4715 rb_fiber_atfork(th);
4716
4717 /* We don't want reproduce CVE-2003-0900. */
4719}
4720
4721static void
4722terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4723{
4724 if (th != current_th) {
4725 thread_cleanup_func_before_exec(th);
4726 }
4727}
4728
4729void
4731{
4732 rb_thread_t *th = GET_THREAD();
4733 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4734}
4735#else
4736void
4737rb_thread_atfork(void)
4738{
4739}
4740
4741void
4743{
4744}
4745#endif
4747struct thgroup {
4748 int enclosed;
4749};
4750
4751static const rb_data_type_t thgroup_data_type = {
4752 "thgroup",
4753 {
4754 0,
4756 NULL, // No external memory to report
4757 },
4758 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4759};
4760
4761/*
4762 * Document-class: ThreadGroup
4763 *
4764 * ThreadGroup provides a means of keeping track of a number of threads as a
4765 * group.
4766 *
4767 * A given Thread object can only belong to one ThreadGroup at a time; adding
4768 * a thread to a new group will remove it from any previous group.
4769 *
4770 * Newly created threads belong to the same group as the thread from which they
4771 * were created.
4772 */
4773
4774/*
4775 * Document-const: Default
4776 *
4777 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4778 * by default.
4779 */
4780static VALUE
4781thgroup_s_alloc(VALUE klass)
4782{
4783 VALUE group;
4784 struct thgroup *data;
4785
4786 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4787 data->enclosed = 0;
4788
4789 return group;
4790}
4791
4792/*
4793 * call-seq:
4794 * thgrp.list -> array
4795 *
4796 * Returns an array of all existing Thread objects that belong to this group.
4797 *
4798 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4799 */
4800
4801static VALUE
4802thgroup_list(VALUE group)
4803{
4804 VALUE ary = rb_ary_new();
4805 rb_thread_t *th = 0;
4806 rb_ractor_t *r = GET_RACTOR();
4807
4808 ccan_list_for_each(&r->threads.set, th, lt_node) {
4809 if (th->thgroup == group) {
4810 rb_ary_push(ary, th->self);
4811 }
4812 }
4813 return ary;
4814}
4815
4816
4817/*
4818 * call-seq:
4819 * thgrp.enclose -> thgrp
4820 *
4821 * Prevents threads from being added to or removed from the receiving
4822 * ThreadGroup.
4823 *
4824 * New threads can still be started in an enclosed ThreadGroup.
4825 *
4826 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4827 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4828 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4829 * tg.add thr
4830 * #=> ThreadError: can't move from the enclosed thread group
4831 */
4832
4833static VALUE
4834thgroup_enclose(VALUE group)
4835{
4836 struct thgroup *data;
4837
4838 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4839 data->enclosed = 1;
4840
4841 return group;
4842}
4843
4844
4845/*
4846 * call-seq:
4847 * thgrp.enclosed? -> true or false
4848 *
4849 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4850 */
4851
4852static VALUE
4853thgroup_enclosed_p(VALUE group)
4854{
4855 struct thgroup *data;
4856
4857 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4858 return RBOOL(data->enclosed);
4859}
4860
4861
4862/*
4863 * call-seq:
4864 * thgrp.add(thread) -> thgrp
4865 *
4866 * Adds the given +thread+ to this group, removing it from any other
4867 * group to which it may have previously been a member.
4868 *
4869 * puts "Initial group is #{ThreadGroup::Default.list}"
4870 * tg = ThreadGroup.new
4871 * t1 = Thread.new { sleep }
4872 * t2 = Thread.new { sleep }
4873 * puts "t1 is #{t1}"
4874 * puts "t2 is #{t2}"
4875 * tg.add(t1)
4876 * puts "Initial group now #{ThreadGroup::Default.list}"
4877 * puts "tg group now #{tg.list}"
4878 *
4879 * This will produce:
4880 *
4881 * Initial group is #<Thread:0x401bdf4c>
4882 * t1 is #<Thread:0x401b3c90>
4883 * t2 is #<Thread:0x401b3c18>
4884 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4885 * tg group now #<Thread:0x401b3c90>
4886 */
4887
4888static VALUE
4889thgroup_add(VALUE group, VALUE thread)
4890{
4891 rb_thread_t *target_th = rb_thread_ptr(thread);
4892 struct thgroup *data;
4893
4894 if (OBJ_FROZEN(group)) {
4895 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4896 }
4897 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4898 if (data->enclosed) {
4899 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4900 }
4901
4902 if (OBJ_FROZEN(target_th->thgroup)) {
4903 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4904 }
4905 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4906 if (data->enclosed) {
4907 rb_raise(rb_eThreadError,
4908 "can't move from the enclosed thread group");
4909 }
4910
4911 target_th->thgroup = group;
4912 return group;
4913}
4914
4915/*
4916 * Document-class: ThreadShield
4917 */
4918static void
4919thread_shield_mark(void *ptr)
4920{
4921 rb_gc_mark((VALUE)ptr);
4922}
4923
4924static const rb_data_type_t thread_shield_data_type = {
4925 "thread_shield",
4926 {thread_shield_mark, 0, 0,},
4927 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4928};
4929
4930static VALUE
4931thread_shield_alloc(VALUE klass)
4932{
4933 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4934}
4935
4936#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4937#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4938#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4939#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4940STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4941static inline unsigned int
4942rb_thread_shield_waiting(VALUE b)
4943{
4944 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
4945}
4946
4947static inline void
4948rb_thread_shield_waiting_inc(VALUE b)
4949{
4950 unsigned int w = rb_thread_shield_waiting(b);
4951 w++;
4952 if (w > THREAD_SHIELD_WAITING_MAX)
4953 rb_raise(rb_eRuntimeError, "waiting count overflow");
4954 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4955 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4956}
4957
4958static inline void
4959rb_thread_shield_waiting_dec(VALUE b)
4960{
4961 unsigned int w = rb_thread_shield_waiting(b);
4962 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4963 w--;
4964 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4965 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4966}
4967
4968VALUE
4969rb_thread_shield_new(void)
4970{
4971 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4972 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4973 return thread_shield;
4974}
4975
4976bool
4977rb_thread_shield_owned(VALUE self)
4978{
4979 VALUE mutex = GetThreadShieldPtr(self);
4980 if (!mutex) return false;
4981
4982 rb_mutex_t *m = mutex_ptr(mutex);
4983
4984 return m->fiber == GET_EC()->fiber_ptr;
4985}
4986
4987/*
4988 * Wait a thread shield.
4989 *
4990 * Returns
4991 * true: acquired the thread shield
4992 * false: the thread shield was destroyed and no other threads waiting
4993 * nil: the thread shield was destroyed but still in use
4994 */
4995VALUE
4996rb_thread_shield_wait(VALUE self)
4997{
4998 VALUE mutex = GetThreadShieldPtr(self);
4999 rb_mutex_t *m;
5000
5001 if (!mutex) return Qfalse;
5002 m = mutex_ptr(mutex);
5003 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5004 rb_thread_shield_waiting_inc(self);
5005 rb_mutex_lock(mutex);
5006 rb_thread_shield_waiting_dec(self);
5007 if (DATA_PTR(self)) return Qtrue;
5008 rb_mutex_unlock(mutex);
5009 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5010}
5011
5012static VALUE
5013thread_shield_get_mutex(VALUE self)
5014{
5015 VALUE mutex = GetThreadShieldPtr(self);
5016 if (!mutex)
5017 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5018 return mutex;
5019}
5020
5021/*
5022 * Release a thread shield, and return true if it has waiting threads.
5023 */
5024VALUE
5025rb_thread_shield_release(VALUE self)
5026{
5027 VALUE mutex = thread_shield_get_mutex(self);
5028 rb_mutex_unlock(mutex);
5029 return RBOOL(rb_thread_shield_waiting(self) > 0);
5030}
5031
5032/*
5033 * Release and destroy a thread shield, and return true if it has waiting threads.
5034 */
5035VALUE
5036rb_thread_shield_destroy(VALUE self)
5037{
5038 VALUE mutex = thread_shield_get_mutex(self);
5039 DATA_PTR(self) = 0;
5040 rb_mutex_unlock(mutex);
5041 return RBOOL(rb_thread_shield_waiting(self) > 0);
5042}
5043
5044static VALUE
5045threadptr_recursive_hash(rb_thread_t *th)
5046{
5047 return th->ec->local_storage_recursive_hash;
5048}
5049
5050static void
5051threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5052{
5053 th->ec->local_storage_recursive_hash = hash;
5054}
5055
5057
5058/*
5059 * Returns the current "recursive list" used to detect recursion.
5060 * This list is a hash table, unique for the current thread and for
5061 * the current __callee__.
5062 */
5063
5064static VALUE
5065recursive_list_access(VALUE sym)
5066{
5067 rb_thread_t *th = GET_THREAD();
5068 VALUE hash = threadptr_recursive_hash(th);
5069 VALUE list;
5070 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5071 hash = rb_ident_hash_new();
5072 threadptr_recursive_hash_set(th, hash);
5073 list = Qnil;
5074 }
5075 else {
5076 list = rb_hash_aref(hash, sym);
5077 }
5078 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5079 list = rb_ident_hash_new();
5080 rb_hash_aset(hash, sym, list);
5081 }
5082 return list;
5083}
5084
5085/*
5086 * Returns Qtrue if and only if obj (or the pair <obj, paired_obj>) is already
5087 * in the recursion list.
5088 * Assumes the recursion list is valid.
5089 */
5090
5091static VALUE
5092recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5093{
5094#if SIZEOF_LONG == SIZEOF_VOIDP
5095 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5096#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5097 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5098 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5099#endif
5100
5101 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5102 if (UNDEF_P(pair_list))
5103 return Qfalse;
5104 if (paired_obj_id) {
5105 if (!RB_TYPE_P(pair_list, T_HASH)) {
5106 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5107 return Qfalse;
5108 }
5109 else {
5110 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5111 return Qfalse;
5112 }
5113 }
5114 return Qtrue;
5115}
5116
5117/*
5118 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5119 * For a single obj, it sets list[obj] to Qtrue.
5120 * For a pair, it sets list[obj] to paired_obj_id if possible,
5121 * otherwise list[obj] becomes a hash like:
5122 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5123 * Assumes the recursion list is valid.
5124 */
5125
5126static void
5127recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5128{
5129 VALUE pair_list;
5130
5131 if (!paired_obj) {
5132 rb_hash_aset(list, obj, Qtrue);
5133 }
5134 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5135 rb_hash_aset(list, obj, paired_obj);
5136 }
5137 else {
5138 if (!RB_TYPE_P(pair_list, T_HASH)){
5139 VALUE other_paired_obj = pair_list;
5140 pair_list = rb_hash_new();
5141 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5142 rb_hash_aset(list, obj, pair_list);
5143 }
5144 rb_hash_aset(pair_list, paired_obj, Qtrue);
5145 }
5146}
5147
5148/*
5149 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5150 * For a pair, if list[obj] is a hash, then paired_obj_id is
5151 * removed from the hash and no attempt is made to simplify
5152 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5153 * Assumes the recursion list is valid.
5154 */
5155
5156static int
5157recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5158{
5159 if (paired_obj) {
5160 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5161 if (UNDEF_P(pair_list)) {
5162 return 0;
5163 }
5164 if (RB_TYPE_P(pair_list, T_HASH)) {
5165 rb_hash_delete_entry(pair_list, paired_obj);
5166 if (!RHASH_EMPTY_P(pair_list)) {
5167 return 1; /* keep hash until is empty */
5168 }
5169 }
5170 }
5171 rb_hash_delete_entry(list, obj);
5172 return 1;
5173}
5175struct exec_recursive_params {
5176 VALUE (*func) (VALUE, VALUE, int);
5177 VALUE list;
5178 VALUE obj;
5179 VALUE pairid;
5180 VALUE arg;
5181};
5182
5183static VALUE
5184exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5185{
5186 struct exec_recursive_params *p = (void *)data;
5187 return (*p->func)(p->obj, p->arg, FALSE);
5188}
5189
5190/*
5191 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5192 * current method is called recursively on obj, or on the pair <obj, pairid>
5193 * If outer is 0, then the innermost func will be called with recursive set
5194 * to Qtrue, otherwise the outermost func will be called. In the latter case,
5195 * all inner func are short-circuited by throw.
5196 * Implementation details: the value thrown is the recursive list which is
5197 * proper to the current method and unlikely to be caught anywhere else.
5198 * list[recursive_key] is used as a flag for the outermost call.
5199 */
5200
5201static VALUE
5202exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5203{
5204 VALUE result = Qundef;
5205 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5206 struct exec_recursive_params p;
5207 int outermost;
5208 p.list = recursive_list_access(sym);
5209 p.obj = obj;
5210 p.pairid = pairid;
5211 p.arg = arg;
5212 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5213
5214 if (recursive_check(p.list, p.obj, pairid)) {
5215 if (outer && !outermost) {
5216 rb_throw_obj(p.list, p.list);
5217 }
5218 return (*func)(obj, arg, TRUE);
5219 }
5220 else {
5221 enum ruby_tag_type state;
5222
5223 p.func = func;
5224
5225 if (outermost) {
5226 recursive_push(p.list, ID2SYM(recursive_key), 0);
5227 recursive_push(p.list, p.obj, p.pairid);
5228 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5229 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5230 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5231 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5232 if (result == p.list) {
5233 result = (*func)(obj, arg, TRUE);
5234 }
5235 }
5236 else {
5237 volatile VALUE ret = Qundef;
5238 recursive_push(p.list, p.obj, p.pairid);
5239 EC_PUSH_TAG(GET_EC());
5240 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5241 ret = (*func)(obj, arg, FALSE);
5242 }
5243 EC_POP_TAG();
5244 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5245 goto invalid;
5246 }
5247 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5248 result = ret;
5249 }
5250 }
5251 *(volatile struct exec_recursive_params *)&p;
5252 return result;
5253
5254 invalid:
5255 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5256 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5257 sym, rb_thread_current());
5259}
5260
5261/*
5262 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5263 * current method is called recursively on obj
5264 */
5265
5267rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5268{
5269 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5270}
5271
5272/*
5273 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5274 * current method is called recursively on the ordered pair <obj, paired_obj>
5275 */
5276
5278rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5279{
5280 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5281}
5282
5283/*
5284 * If recursion is detected on the current method and obj, the outermost
5285 * func will be called with (obj, arg, Qtrue). All inner func will be
5286 * short-circuited using throw.
5287 */
5288
5290rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5291{
5292 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5293}
5294
5295VALUE
5296rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5297{
5298 return exec_recursive(func, obj, 0, arg, 1, mid);
5299}
5300
5301/*
5302 * If recursion is detected on the current method, obj and paired_obj,
5303 * the outermost func will be called with (obj, arg, Qtrue). All inner
5304 * func will be short-circuited using throw.
5305 */
5306
5308rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5309{
5310 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5311}
5312
5313/*
5314 * call-seq:
5315 * thread.backtrace -> array or nil
5316 *
5317 * Returns the current backtrace of the target thread.
5318 *
5319 */
5320
5321static VALUE
5322rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5323{
5324 return rb_vm_thread_backtrace(argc, argv, thval);
5325}
5326
5327/* call-seq:
5328 * thread.backtrace_locations(*args) -> array or nil
5329 *
5330 * Returns the execution stack for the target thread---an array containing
5331 * backtrace location objects.
5332 *
5333 * See Thread::Backtrace::Location for more information.
5334 *
5335 * This method behaves similarly to Kernel#caller_locations except it applies
5336 * to a specific thread.
5337 */
5338static VALUE
5339rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5340{
5341 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5342}
5343
5344void
5345Init_Thread_Mutex(void)
5346{
5347 rb_thread_t *th = GET_THREAD();
5348
5349 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5350 rb_native_mutex_initialize(&th->interrupt_lock);
5351}
5352
5353/*
5354 * Document-class: ThreadError
5355 *
5356 * Raised when an invalid operation is attempted on a thread.
5357 *
5358 * For example, when no other thread has been started:
5359 *
5360 * Thread.stop
5361 *
5362 * This will raises the following exception:
5363 *
5364 * ThreadError: stopping only thread
5365 * note: use sleep to stop forever
5366 */
5367
5368void
5369Init_Thread(void)
5370{
5371 VALUE cThGroup;
5372 rb_thread_t *th = GET_THREAD();
5373
5374 sym_never = ID2SYM(rb_intern_const("never"));
5375 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5376 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5377
5378 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5379 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5380 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5381 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5382 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5383 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5384 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5385 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5386 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5387 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5388 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5389 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5390 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5391 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5392 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5393 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5394 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5395 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5396 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5397
5398 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5399 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5400 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5401 rb_define_method(rb_cThread, "value", thread_value, 0);
5402 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5403 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5404 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5405 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5406 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5407 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5408 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5409 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5410 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5411 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5412 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5413 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5414 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5415 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5416 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5417 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5418 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5419 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5420 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5421 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5422 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5423 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5424 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5425 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5426 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5427 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5428
5429 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5430 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5431 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5432 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5433 rb_define_alias(rb_cThread, "inspect", "to_s");
5434
5435 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5436 "stream closed in another thread");
5437
5438 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5439 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5440 rb_define_method(cThGroup, "list", thgroup_list, 0);
5441 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5442 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5443 rb_define_method(cThGroup, "add", thgroup_add, 1);
5444
5445 {
5446 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5447 rb_define_const(cThGroup, "Default", th->thgroup);
5448 }
5449
5451
5452 /* init thread core */
5453 {
5454 /* main thread setting */
5455 {
5456 /* acquire global vm lock */
5457#ifdef HAVE_PTHREAD_NP_H
5458 VM_ASSERT(TH_SCHED(th)->running == th);
5459#endif
5460 // thread_sched_to_running() should not be called because
5461 // it assumes blocked by thread_sched_to_waiting().
5462 // thread_sched_to_running(sched, th);
5463
5464 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5465 th->pending_interrupt_queue_checked = 0;
5466 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5467 }
5468 }
5469
5470 rb_thread_create_timer_thread();
5471
5472 Init_thread_sync();
5473
5474 // TODO: Suppress unused function warning for now
5475 // if (0) rb_thread_sched_destroy(NULL);
5476}
5477
5480{
5481 rb_thread_t *th = ruby_thread_from_native();
5482
5483 return th != 0;
5484}
5485
5486#ifdef NON_SCALAR_THREAD_ID
5487 #define thread_id_str(th) (NULL)
5488#else
5489 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5490#endif
5491
5492static void
5493debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5494{
5495 rb_thread_t *th = 0;
5496 VALUE sep = rb_str_new_cstr("\n ");
5497
5498 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5499 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5500 (void *)GET_THREAD(), (void *)r->threads.main);
5501
5502 ccan_list_for_each(&r->threads.set, th, lt_node) {
5503 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5504 "native:%p int:%u",
5505 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5506
5507 if (th->locking_mutex) {
5508 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5509 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5510 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5511 }
5512
5513 {
5514 struct rb_waiting_list *list = th->join_list;
5515 while (list) {
5516 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5517 list = list->next;
5518 }
5519 }
5520 rb_str_catf(msg, "\n ");
5521 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5522 rb_str_catf(msg, "\n");
5523 }
5524}
5525
5526static void
5527rb_check_deadlock(rb_ractor_t *r)
5528{
5529 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5530
5531#ifdef RUBY_THREAD_PTHREAD_H
5532 if (r->threads.sched.readyq_cnt > 0) return;
5533#endif
5534
5535 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5536 int ltnum = rb_ractor_living_thread_num(r);
5537
5538 if (ltnum > sleeper_num) return;
5539 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5540
5541 int found = 0;
5542 rb_thread_t *th = NULL;
5543
5544 ccan_list_for_each(&r->threads.set, th, lt_node) {
5545 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5546 found = 1;
5547 }
5548 else if (th->locking_mutex) {
5549 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5550 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5551 found = 1;
5552 }
5553 }
5554 if (found)
5555 break;
5556 }
5557
5558 if (!found) {
5559 VALUE argv[2];
5560 argv[0] = rb_eFatal;
5561 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5562 debug_deadlock_check(r, argv[1]);
5563 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5564 rb_threadptr_raise(r->threads.main, 2, argv);
5565 }
5566}
5567
5568// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5569// structs. Defined here because the struct definition lives here as well.
5570size_t
5571rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5572{
5573 struct waiting_fd *waitfd = 0;
5574 size_t size = 0;
5575
5576 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5577 size += sizeof(struct waiting_fd);
5578 }
5579
5580 return size;
5581}
5582
5583static void
5584update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5585{
5586 const rb_control_frame_t *cfp = GET_EC()->cfp;
5587 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5588 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5589 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5590 if (lines) {
5591 long line = rb_sourceline() - 1;
5592 long count;
5593 VALUE num;
5594 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5595 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5596 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5597 rb_ary_push(lines, LONG2FIX(line + 1));
5598 return;
5599 }
5600 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5601 return;
5602 }
5603 num = RARRAY_AREF(lines, line);
5604 if (!FIXNUM_P(num)) return;
5605 count = FIX2LONG(num) + 1;
5606 if (POSFIXABLE(count)) {
5607 RARRAY_ASET(lines, line, LONG2FIX(count));
5608 }
5609 }
5610 }
5611}
5612
5613static void
5614update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5615{
5616 const rb_control_frame_t *cfp = GET_EC()->cfp;
5617 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5618 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5619 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5620 if (branches) {
5621 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5622 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5623 VALUE counters = RARRAY_AREF(branches, 1);
5624 VALUE num = RARRAY_AREF(counters, idx);
5625 count = FIX2LONG(num) + 1;
5626 if (POSFIXABLE(count)) {
5627 RARRAY_ASET(counters, idx, LONG2FIX(count));
5628 }
5629 }
5630 }
5631}
5632
5633const rb_method_entry_t *
5634rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5635{
5636 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5637
5638 if (!me->def) return NULL; // negative cme
5639
5640 retry:
5641 switch (me->def->type) {
5642 case VM_METHOD_TYPE_ISEQ: {
5643 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5644 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5645 path = rb_iseq_path(iseq);
5646 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5647 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5648 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5649 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5650 break;
5651 }
5652 case VM_METHOD_TYPE_BMETHOD: {
5653 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5654 if (iseq) {
5655 rb_iseq_location_t *loc;
5656 rb_iseq_check(iseq);
5657 path = rb_iseq_path(iseq);
5658 loc = &ISEQ_BODY(iseq)->location;
5659 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5660 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5661 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5662 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5663 break;
5664 }
5665 return NULL;
5666 }
5667 case VM_METHOD_TYPE_ALIAS:
5668 me = me->def->body.alias.original_me;
5669 goto retry;
5670 case VM_METHOD_TYPE_REFINED:
5671 me = me->def->body.refined.orig_me;
5672 if (!me) return NULL;
5673 goto retry;
5674 default:
5675 return NULL;
5676 }
5677
5678 /* found */
5679 if (RB_TYPE_P(path, T_ARRAY)) {
5680 path = rb_ary_entry(path, 1);
5681 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5682 }
5683 if (resolved_location) {
5684 resolved_location[0] = path;
5685 resolved_location[1] = beg_pos_lineno;
5686 resolved_location[2] = beg_pos_column;
5687 resolved_location[3] = end_pos_lineno;
5688 resolved_location[4] = end_pos_column;
5689 }
5690 return me;
5691}
5692
5693static void
5694update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5695{
5696 const rb_control_frame_t *cfp = GET_EC()->cfp;
5697 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5698 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5699 VALUE rcount;
5700 long count;
5701
5702 me = rb_resolve_me_location(me, 0);
5703 if (!me) return;
5704
5705 rcount = rb_hash_aref(me2counter, (VALUE) me);
5706 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5707 if (POSFIXABLE(count)) {
5708 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5709 }
5710}
5711
5712VALUE
5713rb_get_coverages(void)
5714{
5715 return GET_VM()->coverages;
5716}
5717
5718int
5719rb_get_coverage_mode(void)
5720{
5721 return GET_VM()->coverage_mode;
5722}
5723
5724void
5725rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5726{
5727 GET_VM()->coverages = coverages;
5728 GET_VM()->me2counter = me2counter;
5729 GET_VM()->coverage_mode = mode;
5730}
5731
5732void
5733rb_resume_coverages(void)
5734{
5735 int mode = GET_VM()->coverage_mode;
5736 VALUE me2counter = GET_VM()->me2counter;
5737 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5738 if (mode & COVERAGE_TARGET_BRANCHES) {
5739 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5740 }
5741 if (mode & COVERAGE_TARGET_METHODS) {
5742 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5743 }
5744}
5745
5746void
5747rb_suspend_coverages(void)
5748{
5749 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5750 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5751 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5752 }
5753 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5754 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5755 }
5756}
5757
5758/* Make coverage arrays empty so old covered files are no longer tracked. */
5759void
5760rb_reset_coverages(void)
5761{
5762 rb_clear_coverages();
5763 rb_iseq_remove_coverage_all();
5764 GET_VM()->coverages = Qfalse;
5765}
5766
5767VALUE
5768rb_default_coverage(int n)
5769{
5770 VALUE coverage = rb_ary_hidden_new_fill(3);
5771 VALUE lines = Qfalse, branches = Qfalse;
5772 int mode = GET_VM()->coverage_mode;
5773
5774 if (mode & COVERAGE_TARGET_LINES) {
5775 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5776 }
5777 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5778
5779 if (mode & COVERAGE_TARGET_BRANCHES) {
5780 branches = rb_ary_hidden_new_fill(2);
5781 /* internal data structures for branch coverage:
5782 *
5783 * { branch base node =>
5784 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5785 * branch target id =>
5786 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5787 * ...
5788 * }],
5789 * ...
5790 * }
5791 *
5792 * Example:
5793 * { NODE_CASE =>
5794 * [1, 0, 4, 3, {
5795 * NODE_WHEN => [2, 8, 2, 9, 0],
5796 * NODE_WHEN => [3, 8, 3, 9, 1],
5797 * ...
5798 * }],
5799 * ...
5800 * }
5801 */
5802 VALUE structure = rb_hash_new();
5803 rb_obj_hide(structure);
5804 RARRAY_ASET(branches, 0, structure);
5805 /* branch execution counters */
5806 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5807 }
5808 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5809
5810 return coverage;
5811}
5812
5813static VALUE
5814uninterruptible_exit(VALUE v)
5815{
5816 rb_thread_t *cur_th = GET_THREAD();
5817 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5818
5819 cur_th->pending_interrupt_queue_checked = 0;
5820 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5821 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5822 }
5823 return Qnil;
5824}
5825
5826VALUE
5827rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5828{
5829 VALUE interrupt_mask = rb_ident_hash_new();
5830 rb_thread_t *cur_th = GET_THREAD();
5831
5832 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5833 OBJ_FREEZE_RAW(interrupt_mask);
5834 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5835
5836 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5837
5838 RUBY_VM_CHECK_INTS(cur_th->ec);
5839 return ret;
5840}
5841
5842static void
5843thread_specific_storage_alloc(rb_thread_t *th)
5844{
5845 VM_ASSERT(th->specific_storage == NULL);
5846
5847 if (UNLIKELY(specific_key_count > 0)) {
5848 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5849 }
5850}
5851
5852rb_internal_thread_specific_key_t
5854{
5855 rb_vm_t *vm = GET_VM();
5856
5857 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5858 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5859 }
5860 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5861 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5862 }
5863 else {
5864 rb_internal_thread_specific_key_t key = specific_key_count++;
5865
5866 if (key == 0) {
5867 // allocate
5868 rb_ractor_t *cr = GET_RACTOR();
5869 rb_thread_t *th;
5870
5871 ccan_list_for_each(&cr->threads.set, th, lt_node) {
5872 thread_specific_storage_alloc(th);
5873 }
5874 }
5875 return key;
5876 }
5877}
5878
5879// async and native thread safe.
5880void *
5881rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5882{
5883 rb_thread_t *th = DATA_PTR(thread_val);
5884
5885 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5886 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5887 VM_ASSERT(th->specific_storage);
5888
5889 return th->specific_storage[key];
5890}
5891
5892// async and native thread safe.
5893void
5894rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5895{
5896 rb_thread_t *th = DATA_PTR(thread_val);
5897
5898 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5899 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5900 VM_ASSERT(th->specific_storage);
5901
5902 th->specific_storage[key] = data;
5903}
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:315
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:970
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2332
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1096
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:879
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:866
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:137
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE_RAW
Old name of RB_OBJ_FREEZE_RAW.
Definition fl_type.h:136
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:395
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:296
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:482
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1337
VALUE rb_eIOError
IOError exception.
Definition io.c:178
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1341
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1344
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:3779
VALUE rb_eFatal
fatal exception.
Definition error.c:1340
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1342
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:423
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1382
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1336
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:884
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4454
VALUE rb_eSignal
SignalException exception.
Definition error.c:1339
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2058
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:215
VALUE rb_cThread
Thread class.
Definition vm.c:524
VALUE rb_cModule
Module class.
Definition object.c:65
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3639
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:830
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:280
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:808
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1782
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3500
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1435
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3516
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2709
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:2948
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
Definition thread.c:5266
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1371
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2650
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1403
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2860
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
Definition thread.c:5277
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4741
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1418
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2851
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2804
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
Definition thread.c:5307
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1378
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4736
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2927
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3788
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3664
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1466
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
Definition thread.c:5289
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2813
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1441
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1943
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2881
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1854
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1340
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:283
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1844
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:276
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1095
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12042
ID rb_to_id(VALUE str)
Definition string.c:12032
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3690
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:179
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:5880
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:5893
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:5852
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1523
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1837
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1651
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
Definition thread.c:1658
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1376
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2254
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:366
#define ALLOCA_N(type, n)
Definition memory.h:286
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:161
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:449
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5478
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:219
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:383
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:180
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:402
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4275
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:62
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Definition method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:298
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:304
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:286
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:292
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40