Ruby 3.3.6p108 (2024-11-05 revision 75015d4c1f6965b5e85e96fb309f1f2129f933c0)
thread_win32.c
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_win32.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include <process.h>
15
16#define TIME_QUANTUM_USEC (10 * 1000)
17#define RB_CONDATTR_CLOCK_MONOTONIC 1 /* no effect */
18
19#undef Sleep
20
21#define native_thread_yield() Sleep(0)
22#define unregister_ubf_list(th)
23#define ubf_wakeup_all_threads() do {} while (0)
24#define ubf_threads_empty() (1)
25#define ubf_timer_disarm() do {} while (0)
26#define ubf_list_atfork() do {} while (0)
27
28static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
29
30static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
31
32rb_internal_thread_event_hook_t *
33rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
34{
35 // not implemented
36 return NULL;
37}
38
39bool
40rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
41{
42 // not implemented
43 return false;
44}
45
47static void
48w32_error(const char *func)
49{
50 LPVOID lpMsgBuf;
51 DWORD err = GetLastError();
52 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
53 FORMAT_MESSAGE_FROM_SYSTEM |
54 FORMAT_MESSAGE_IGNORE_INSERTS,
55 NULL,
56 err,
57 MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
58 (LPTSTR) & lpMsgBuf, 0, NULL) == 0)
59 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
60 FORMAT_MESSAGE_FROM_SYSTEM |
61 FORMAT_MESSAGE_IGNORE_INSERTS,
62 NULL,
63 err,
64 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
65 (LPTSTR) & lpMsgBuf, 0, NULL);
66 rb_bug("%s: %s", func, (char*)lpMsgBuf);
68}
69
70#define W32_EVENT_DEBUG 0
71
72#if W32_EVENT_DEBUG
73#define w32_event_debug printf
74#else
75#define w32_event_debug if (0) printf
76#endif
77
78static int
79w32_mutex_lock(HANDLE lock, bool try)
80{
81 DWORD result;
82 while (1) {
83 // RUBY_DEBUG_LOG() is not available because RUBY_DEBUG_LOG() calls it.
84 w32_event_debug("lock:%p\n", lock);
85
86 result = w32_wait_events(&lock, 1, try ? 0 : INFINITE, 0);
87 switch (result) {
88 case WAIT_OBJECT_0:
89 /* get mutex object */
90 w32_event_debug("locked lock:%p\n", lock);
91 return 0;
92
93 case WAIT_OBJECT_0 + 1:
94 /* interrupt */
95 errno = EINTR;
96 w32_event_debug("interrupted lock:%p\n", lock);
97 return 0;
98
99 case WAIT_TIMEOUT:
100 w32_event_debug("timeout locK:%p\n", lock);
101 return EBUSY;
102
103 case WAIT_ABANDONED:
104 rb_bug("win32_mutex_lock: WAIT_ABANDONED");
105 break;
106
107 default:
108 rb_bug("win32_mutex_lock: unknown result (%ld)", result);
109 break;
110 }
111 }
112 return 0;
113}
114
115static HANDLE
116w32_mutex_create(void)
117{
118 HANDLE lock = CreateMutex(NULL, FALSE, NULL);
119 if (lock == NULL) {
120 w32_error("rb_native_mutex_initialize");
121 }
122 return lock;
123}
124
125#define GVL_DEBUG 0
126
127static void
128thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
129{
130 w32_mutex_lock(sched->lock, false);
131 if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
132}
133
134#define thread_sched_to_dead thread_sched_to_waiting
135
136static void
137thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
138{
139 ReleaseMutex(sched->lock);
140}
141
142static void
143thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
144{
145 thread_sched_to_waiting(sched, th);
146 native_thread_yield();
147 thread_sched_to_running(sched, th);
148}
149
150void
151rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
152{
153 if (GVL_DEBUG) fprintf(stderr, "sched init\n");
154 sched->lock = w32_mutex_create();
155}
156
157#if 0
158// per-ractor
159void
160rb_thread_sched_destroy(struct rb_thread_sched *sched)
161{
162 if (GVL_DEBUG) fprintf(stderr, "sched destroy\n");
163 CloseHandle(sched->lock);
164}
165#endif
166
168ruby_thread_from_native(void)
169{
170 return TlsGetValue(ruby_native_thread_key);
171}
172
173int
174ruby_thread_set_native(rb_thread_t *th)
175{
176 if (th && th->ec) {
177 rb_ractor_set_current_ec(th->ractor, th->ec);
178 }
179 return TlsSetValue(ruby_native_thread_key, th);
180}
181
182void
183Init_native_thread(rb_thread_t *main_th)
184{
185 if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
186 rb_bug("TlsAlloc() for ruby_current_ec_key fails");
187 }
188 if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
189 rb_bug("TlsAlloc() for ruby_native_thread_key fails");
190 }
191
192 // setup main thread
193
194 ruby_thread_set_native(main_th);
195 main_th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
196
197 DuplicateHandle(GetCurrentProcess(),
198 GetCurrentThread(),
199 GetCurrentProcess(),
200 &main_th->nt->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
201
202 RUBY_DEBUG_LOG("initial thread th:%u thid:%p, event: %p",
203 rb_th_serial(main_th),
204 main_th->nt->thread_id,
205 main_th->nt->interrupt_event);
206}
207
208void
209ruby_mn_threads_params(void)
210{
211}
212
213static int
214w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
215{
216 HANDLE *targets = events;
217 HANDLE intr;
218 const int initcount = count;
219 DWORD ret;
220
221 w32_event_debug("events:%p, count:%d, timeout:%ld, th:%u\n",
222 events, count, timeout, th ? rb_th_serial(th) : UINT_MAX);
223
224 if (th && (intr = th->nt->interrupt_event)) {
225 if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) {
226 targets = ALLOCA_N(HANDLE, count + 1);
227 memcpy(targets, events, sizeof(HANDLE) * count);
228
229 targets[count++] = intr;
230 w32_event_debug("handle:%p (count:%d, intr)\n", intr, count);
231 }
232 else if (intr == th->nt->interrupt_event) {
233 w32_error("w32_wait_events");
234 }
235 }
236
237 w32_event_debug("WaitForMultipleObjects start count:%d\n", count);
238 ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
239 w32_event_debug("WaitForMultipleObjects end ret:%lu\n", ret);
240
241 if (ret == (DWORD)(WAIT_OBJECT_0 + initcount) && th) {
242 errno = EINTR;
243 }
244 if (ret == WAIT_FAILED && W32_EVENT_DEBUG) {
245 int i;
246 DWORD dmy;
247 for (i = 0; i < count; i++) {
248 w32_event_debug("i:%d %s\n", i, GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
249 }
250 }
251 return ret;
252}
253
254static void ubf_handle(void *ptr);
255#define ubf_select ubf_handle
256
257int
258rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
259{
260 return w32_wait_events(events, num, timeout, ruby_thread_from_native());
261}
262
263int
264rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
265{
266 int ret;
267 rb_thread_t *th = GET_THREAD();
268
269 BLOCKING_REGION(th, ret = rb_w32_wait_events_blocking(events, num, timeout),
270 ubf_handle, ruby_thread_from_native(), FALSE);
271 return ret;
272}
273
274static void
275w32_close_handle(HANDLE handle)
276{
277 if (CloseHandle(handle) == 0) {
278 w32_error("w32_close_handle");
279 }
280}
281
282static void
283w32_resume_thread(HANDLE handle)
284{
285 if (ResumeThread(handle) == (DWORD)-1) {
286 w32_error("w32_resume_thread");
287 }
288}
289
290#ifdef _MSC_VER
291#define HAVE__BEGINTHREADEX 1
292#else
293#undef HAVE__BEGINTHREADEX
294#endif
295
296#ifdef HAVE__BEGINTHREADEX
297#define start_thread (HANDLE)_beginthreadex
298#define thread_errno errno
299typedef unsigned long (__stdcall *w32_thread_start_func)(void*);
300#else
301#define start_thread CreateThread
302#define thread_errno rb_w32_map_errno(GetLastError())
303typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
304#endif
305
306static HANDLE
307w32_create_thread(DWORD stack_size, w32_thread_start_func func, void *val)
308{
309 return start_thread(0, stack_size, func, val, CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 0);
310}
311
312int
313rb_w32_sleep(unsigned long msec)
314{
315 return w32_wait_events(0, 0, msec, ruby_thread_from_native());
316}
317
318int WINAPI
319rb_w32_Sleep(unsigned long msec)
320{
321 int ret;
322 rb_thread_t *th = GET_THREAD();
323
324 BLOCKING_REGION(th, ret = rb_w32_sleep(msec),
325 ubf_handle, ruby_thread_from_native(), FALSE);
326 return ret;
327}
328
329static DWORD
330hrtime2msec(rb_hrtime_t hrt)
331{
332 return (DWORD)hrt / (DWORD)RB_HRTIME_PER_MSEC;
333}
334
335static void
336native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
337{
338 const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
339
340 THREAD_BLOCKING_BEGIN(th);
341 {
342 DWORD ret;
343
344 rb_native_mutex_lock(&th->interrupt_lock);
345 th->unblock.func = ubf_handle;
346 th->unblock.arg = th;
347 rb_native_mutex_unlock(&th->interrupt_lock);
348
349 if (RUBY_VM_INTERRUPTED(th->ec)) {
350 /* interrupted. return immediate */
351 }
352 else {
353 RUBY_DEBUG_LOG("start msec:%lu", msec);
354 ret = w32_wait_events(0, 0, msec, th);
355 RUBY_DEBUG_LOG("done ret:%lu", ret);
356 (void)ret;
357 }
358
359 rb_native_mutex_lock(&th->interrupt_lock);
360 th->unblock.func = 0;
361 th->unblock.arg = 0;
362 rb_native_mutex_unlock(&th->interrupt_lock);
363 }
364 THREAD_BLOCKING_END(th);
365}
366
367void
368rb_native_mutex_lock(rb_nativethread_lock_t *lock)
369{
370#ifdef USE_WIN32_MUTEX
371 w32_mutex_lock(lock->mutex, false);
372#else
373 EnterCriticalSection(&lock->crit);
374#endif
375}
376
377int
378rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
379{
380#ifdef USE_WIN32_MUTEX
381 return w32_mutex_lock(lock->mutex, true);
382#else
383 return TryEnterCriticalSection(&lock->crit) == 0 ? EBUSY : 0;
384#endif
385}
386
387void
388rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
389{
390#ifdef USE_WIN32_MUTEX
391 RUBY_DEBUG_LOG("lock:%p", lock->mutex);
392 ReleaseMutex(lock->mutex);
393#else
394 LeaveCriticalSection(&lock->crit);
395#endif
396}
397
398void
399rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
400{
401#ifdef USE_WIN32_MUTEX
402 lock->mutex = w32_mutex_create();
403 /* thread_debug("initialize mutex: %p\n", lock->mutex); */
404#else
405 InitializeCriticalSection(&lock->crit);
406#endif
407}
408
409void
410rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
411{
412#ifdef USE_WIN32_MUTEX
413 w32_close_handle(lock->mutex);
414#else
415 DeleteCriticalSection(&lock->crit);
416#endif
417}
418
419struct cond_event_entry {
420 struct cond_event_entry* next;
421 struct cond_event_entry* prev;
422 HANDLE event;
423};
424
425void
426rb_native_cond_signal(rb_nativethread_cond_t *cond)
427{
428 /* cond is guarded by mutex */
429 struct cond_event_entry *e = cond->next;
430 struct cond_event_entry *head = (struct cond_event_entry*)cond;
431
432 if (e != head) {
433 struct cond_event_entry *next = e->next;
434 struct cond_event_entry *prev = e->prev;
435
436 prev->next = next;
437 next->prev = prev;
438 e->next = e->prev = e;
439
440 SetEvent(e->event);
441 }
442}
443
444void
445rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
446{
447 /* cond is guarded by mutex */
448 struct cond_event_entry *e = cond->next;
449 struct cond_event_entry *head = (struct cond_event_entry*)cond;
450
451 while (e != head) {
452 struct cond_event_entry *next = e->next;
453 struct cond_event_entry *prev = e->prev;
454
455 SetEvent(e->event);
456
457 prev->next = next;
458 next->prev = prev;
459 e->next = e->prev = e;
460
461 e = next;
462 }
463}
464
465static int
466native_cond_timedwait_ms(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
467{
468 DWORD r;
469 struct cond_event_entry entry;
470 struct cond_event_entry *head = (struct cond_event_entry*)cond;
471
472 entry.event = CreateEvent(0, FALSE, FALSE, 0);
473
474 /* cond is guarded by mutex */
475 entry.next = head;
476 entry.prev = head->prev;
477 head->prev->next = &entry;
478 head->prev = &entry;
479
481 {
482 r = WaitForSingleObject(entry.event, msec);
483 if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
484 rb_bug("rb_native_cond_wait: WaitForSingleObject returns %lu", r);
485 }
486 }
488
489 entry.prev->next = entry.next;
490 entry.next->prev = entry.prev;
491
492 w32_close_handle(entry.event);
493 return (r == WAIT_OBJECT_0) ? 0 : ETIMEDOUT;
494}
495
496void
497rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
498{
499 native_cond_timedwait_ms(cond, mutex, INFINITE);
500}
501
502static unsigned long
503abs_timespec_to_timeout_ms(const struct timespec *ts)
504{
505 struct timeval tv;
506 struct timeval now;
507
508 gettimeofday(&now, NULL);
509 tv.tv_sec = ts->tv_sec;
510 tv.tv_usec = ts->tv_nsec / 1000;
511
512 if (!rb_w32_time_subtract(&tv, &now))
513 return 0;
514
515 return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
516}
517
518static int
519native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, const struct timespec *ts)
520{
521 unsigned long timeout_ms;
522
523 timeout_ms = abs_timespec_to_timeout_ms(ts);
524 if (!timeout_ms)
525 return ETIMEDOUT;
526
527 return native_cond_timedwait_ms(cond, mutex, timeout_ms);
528}
529
530static struct timespec native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel);
531
532void
533rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
534{
535 struct timespec rel = {
536 .tv_sec = msec / 1000,
537 .tv_nsec = (msec % 1000) * 1000 * 1000,
538 };
539 struct timespec ts = native_cond_timeout(cond, rel);
540 native_cond_timedwait(cond, mutex, &ts);
541}
542
543static struct timespec
544native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
545{
546 int ret;
547 struct timeval tv;
548 struct timespec timeout;
549 struct timespec now;
550
551 ret = gettimeofday(&tv, 0);
552 if (ret != 0)
553 rb_sys_fail(0);
554 now.tv_sec = tv.tv_sec;
555 now.tv_nsec = tv.tv_usec * 1000;
556
557 timeout.tv_sec = now.tv_sec;
558 timeout.tv_nsec = now.tv_nsec;
559 timeout.tv_sec += timeout_rel.tv_sec;
560 timeout.tv_nsec += timeout_rel.tv_nsec;
561
562 if (timeout.tv_nsec >= 1000*1000*1000) {
563 timeout.tv_sec++;
564 timeout.tv_nsec -= 1000*1000*1000;
565 }
566
567 if (timeout.tv_sec < now.tv_sec)
568 timeout.tv_sec = TIMET_MAX;
569
570 return timeout;
571}
572
573void
574rb_native_cond_initialize(rb_nativethread_cond_t *cond)
575{
576 cond->next = (struct cond_event_entry *)cond;
577 cond->prev = (struct cond_event_entry *)cond;
578}
579
580void
581rb_native_cond_destroy(rb_nativethread_cond_t *cond)
582{
583 /* */
584}
585
586void
587ruby_init_stack(volatile VALUE *addr)
588{
589}
590
591#define CHECK_ERR(expr) \
592 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
593
594COMPILER_WARNING_PUSH
595#if defined(__GNUC__)
596COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
597#endif
598static inline SIZE_T
599query_memory_basic_info(PMEMORY_BASIC_INFORMATION mi)
600{
601 return VirtualQuery(mi, mi, sizeof(*mi));
602}
603COMPILER_WARNING_POP
604
605static void
606native_thread_init_stack(rb_thread_t *th)
607{
608 MEMORY_BASIC_INFORMATION mi;
609 char *base, *end;
610 DWORD size, space;
611
612 CHECK_ERR(query_memory_basic_info(&mi));
613 base = mi.AllocationBase;
614 end = mi.BaseAddress;
615 end += mi.RegionSize;
616 size = end - base;
617 space = size / 5;
618 if (space > 1024*1024) space = 1024*1024;
619 th->ec->machine.stack_start = (VALUE *)end - 1;
620 th->ec->machine.stack_maxsize = size - space;
621}
622
623#ifndef InterlockedExchangePointer
624#define InterlockedExchangePointer(t, v) \
625 (void *)InterlockedExchange((long *)(t), (long)(v))
626#endif
627static void
628native_thread_destroy(struct rb_native_thread *nt)
629{
630 if (nt) {
631 HANDLE intr = InterlockedExchangePointer(&nt->interrupt_event, 0);
632 RUBY_DEBUG_LOG("close handle intr:%p, thid:%p\n", intr, nt->thread_id);
633 w32_close_handle(intr);
634 }
635}
636
637static unsigned long __stdcall
638thread_start_func_1(void *th_ptr)
639{
640 rb_thread_t *th = th_ptr;
641 volatile HANDLE thread_id = th->nt->thread_id;
642
643 native_thread_init_stack(th);
644 th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
645
646 /* run */
647 RUBY_DEBUG_LOG("thread created th:%u, thid: %p, event: %p",
648 rb_th_serial(th), th->nt->thread_id, th->nt->interrupt_event);
649
650 thread_sched_to_running(TH_SCHED(th), th);
651 ruby_thread_set_native(th);
652
653 // kick threads
654 thread_start_func_2(th, th->ec->machine.stack_start);
655
656 w32_close_handle(thread_id);
657 RUBY_DEBUG_LOG("thread deleted th:%u", rb_th_serial(th));
658
659 return 0;
660}
661
662static int
663native_thread_create(rb_thread_t *th)
664{
665 // setup nt
666 const size_t stack_size = th->vm->default_params.thread_machine_stack_size;
667 th->nt = ZALLOC(struct rb_native_thread);
668 th->nt->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
669
670 // setup vm stack
671 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
672 void *vm_stack = ruby_xmalloc(vm_stack_word_size * sizeof(VALUE));
673 th->sched.vm_stack = vm_stack;
674 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
675
676 if ((th->nt->thread_id) == 0) {
677 return thread_errno;
678 }
679
680 w32_resume_thread(th->nt->thread_id);
681
682 if (USE_RUBY_DEBUG_LOG) {
683 Sleep(0);
684 RUBY_DEBUG_LOG("th:%u thid:%p intr:%p), stack size: %"PRIuSIZE"",
685 rb_th_serial(th), th->nt->thread_id,
686 th->nt->interrupt_event, stack_size);
687 }
688 return 0;
689}
690
691static void
692native_thread_join(HANDLE th)
693{
694 w32_wait_events(&th, 1, INFINITE, 0);
695}
696
697#if USE_NATIVE_THREAD_PRIORITY
698
699static void
700native_thread_apply_priority(rb_thread_t *th)
701{
702 int priority = th->priority;
703 if (th->priority > 0) {
704 priority = THREAD_PRIORITY_ABOVE_NORMAL;
705 }
706 else if (th->priority < 0) {
707 priority = THREAD_PRIORITY_BELOW_NORMAL;
708 }
709 else {
710 priority = THREAD_PRIORITY_NORMAL;
711 }
712
713 SetThreadPriority(th->nt->thread_id, priority);
714}
715
716#endif /* USE_NATIVE_THREAD_PRIORITY */
717
718int rb_w32_select_with_thread(int, fd_set *, fd_set *, fd_set *, struct timeval *, void *); /* @internal */
719
720static int
721native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
722{
723 fd_set *r = NULL, *w = NULL, *e = NULL;
724 if (readfds) {
725 rb_fd_resize(n - 1, readfds);
726 r = rb_fd_ptr(readfds);
727 }
728 if (writefds) {
729 rb_fd_resize(n - 1, writefds);
730 w = rb_fd_ptr(writefds);
731 }
732 if (exceptfds) {
733 rb_fd_resize(n - 1, exceptfds);
734 e = rb_fd_ptr(exceptfds);
735 }
736 return rb_w32_select_with_thread(n, r, w, e, timeout, th);
737}
738
739/* @internal */
740int
741rb_w32_check_interrupt(rb_thread_t *th)
742{
743 return w32_wait_events(0, 0, 0, th);
744}
745
746static void
747ubf_handle(void *ptr)
748{
749 rb_thread_t *th = (rb_thread_t *)ptr;
750 RUBY_DEBUG_LOG("th:%u\n", rb_th_serial(th));
751
752 if (!SetEvent(th->nt->interrupt_event)) {
753 w32_error("ubf_handle");
754 }
755}
756
757int rb_w32_set_thread_description(HANDLE th, const WCHAR *name);
758int rb_w32_set_thread_description_str(HANDLE th, VALUE name);
759#define native_set_another_thread_name rb_w32_set_thread_description_str
760
761static struct {
762 HANDLE id;
763 HANDLE lock;
764} timer_thread;
765#define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
766
767static unsigned long __stdcall
768timer_thread_func(void *dummy)
769{
770 rb_vm_t *vm = GET_VM();
771 RUBY_DEBUG_LOG("start");
772 rb_w32_set_thread_description(GetCurrentThread(), L"ruby-timer-thread");
773 while (WaitForSingleObject(timer_thread.lock,
774 TIME_QUANTUM_USEC/1000) == WAIT_TIMEOUT) {
775 vm->clock++;
776 rb_threadptr_check_signal(vm->ractor.main_thread);
777 }
778 RUBY_DEBUG_LOG("end");
779 return 0;
780}
781
782void
783rb_thread_wakeup_timer_thread(int sig)
784{
785 /* do nothing */
786}
787
788static void
789rb_thread_create_timer_thread(void)
790{
791 if (timer_thread.id == 0) {
792 if (!timer_thread.lock) {
793 timer_thread.lock = CreateEvent(0, TRUE, FALSE, 0);
794 }
795 timer_thread.id = w32_create_thread(1024 + (USE_RUBY_DEBUG_LOG ? BUFSIZ : 0),
796 timer_thread_func, 0);
797 w32_resume_thread(timer_thread.id);
798 }
799}
800
801static int
802native_stop_timer_thread(void)
803{
804 int stopped = --system_working <= 0;
805 if (stopped) {
806 SetEvent(timer_thread.lock);
807 native_thread_join(timer_thread.id);
808 CloseHandle(timer_thread.lock);
809 timer_thread.lock = 0;
810 }
811 return stopped;
812}
813
814static void
815native_reset_timer_thread(void)
816{
817 if (timer_thread.id) {
818 CloseHandle(timer_thread.id);
819 timer_thread.id = 0;
820 }
821}
822
823int
824ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
825{
826 return rb_ec_raised_p(th->ec, RAISED_STACKOVERFLOW);
827}
828
829#if defined(__MINGW32__)
830LONG WINAPI
831rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *exception)
832{
833 if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
834 rb_ec_raised_set(GET_EC(), RAISED_STACKOVERFLOW);
835 raise(SIGSEGV);
836 }
837 return EXCEPTION_CONTINUE_SEARCH;
838}
839#endif
840
841#ifdef RUBY_ALLOCA_CHKSTK
842void
843ruby_alloca_chkstk(size_t len, void *sp)
844{
845 if (ruby_stack_length(NULL) * sizeof(VALUE) >= len) {
846 rb_execution_context_t *ec = GET_EC();
847 if (!rb_ec_raised_p(ec, RAISED_STACKOVERFLOW)) {
848 rb_ec_raised_set(ec, RAISED_STACKOVERFLOW);
849 rb_exc_raise(sysstack_error);
850 }
851 }
852}
853#endif
854int
855rb_reserved_fd_p(int fd)
856{
857 return 0;
858}
859
860rb_nativethread_id_t
862{
863 return GetCurrentThread();
864}
865
866static void
867native_set_thread_name(rb_thread_t *th)
868{
869}
870
871static VALUE
872native_thread_native_thread_id(rb_thread_t *th)
873{
874 DWORD tid = GetThreadId(th->nt->thread_id);
875 if (tid == 0) rb_sys_fail("GetThreadId");
876 return ULONG2NUM(tid);
877}
878#define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
879
880void
881rb_add_running_thread(rb_thread_t *th){
882 // do nothing
883}
884
885void
886rb_del_running_thread(rb_thread_t *th)
887{
888 // do nothing
889}
890
891static bool
892th_has_dedicated_nt(const rb_thread_t *th)
893{
894 return true;
895}
896
897void
898rb_threadptr_sched_free(rb_thread_t *th)
899{
900 native_thread_destroy(th->nt);
901 ruby_xfree(th->nt);
902 ruby_xfree(th->sched.vm_stack);
903}
904
905void
906rb_threadptr_remove(rb_thread_t *th)
907{
908 // do nothing
909}
910
911void
912rb_thread_sched_mark_zombies(rb_vm_t *vm)
913{
914 // do nothing
915}
916
917static bool
918vm_barrier_finish_p(rb_vm_t *vm)
919{
920 RUBY_DEBUG_LOG("cnt:%u living:%u blocking:%u",
921 vm->ractor.blocking_cnt == vm->ractor.cnt,
922 vm->ractor.sync.barrier_cnt,
923 vm->ractor.cnt,
924 vm->ractor.blocking_cnt);
925
926 VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
927 return vm->ractor.blocking_cnt == vm->ractor.cnt;
928}
929
930void
931rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
932{
933 vm->ractor.sync.barrier_waiting = true;
934
935 RUBY_DEBUG_LOG("barrier start. cnt:%u living:%u blocking:%u",
936 vm->ractor.sync.barrier_cnt,
937 vm->ractor.cnt,
938 vm->ractor.blocking_cnt);
939
940 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
941
942 // send signal
943 rb_ractor_t *r = 0;
944 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
945 if (r != cr) {
946 rb_ractor_vm_barrier_interrupt_running_thread(r);
947 }
948 }
949
950 // wait
951 while (!vm_barrier_finish_p(vm)) {
952 rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_cond);
953 }
954
955 RUBY_DEBUG_LOG("cnt:%u barrier success", vm->ractor.sync.barrier_cnt);
956
957 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
958
959 vm->ractor.sync.barrier_waiting = false;
960 vm->ractor.sync.barrier_cnt++;
961
962 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
963 rb_native_cond_signal(&r->barrier_wait_cond);
964 }
965}
966
967void
968rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
969{
970 vm->ractor.sync.lock_owner = cr;
971 unsigned int barrier_cnt = vm->ractor.sync.barrier_cnt;
972 rb_thread_t *th = GET_THREAD();
973 bool running;
974
975 RB_VM_SAVE_MACHINE_CONTEXT(th);
976
977 if (rb_ractor_status_p(cr, ractor_running)) {
978 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
979 running = true;
980 }
981 else {
982 running = false;
983 }
984 VM_ASSERT(rb_ractor_status_p(cr, ractor_blocking));
985
986 if (vm_barrier_finish_p(vm)) {
987 RUBY_DEBUG_LOG("wakeup barrier owner");
988 rb_native_cond_signal(&vm->ractor.sync.barrier_cond);
989 }
990 else {
991 RUBY_DEBUG_LOG("wait for barrier finish");
992 }
993
994 // wait for restart
995 while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
996 vm->ractor.sync.lock_owner = NULL;
997 rb_native_cond_wait(&cr->barrier_wait_cond, &vm->ractor.sync.lock);
998 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
999 vm->ractor.sync.lock_owner = cr;
1000 }
1001
1002 RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock");
1003
1004 if (running) {
1005 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
1006 }
1007
1008 vm->ractor.sync.lock_owner = NULL;
1009}
1010
1011#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition long.h:60
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:396
void ruby_init_stack(volatile VALUE *addr)
Set stack bottom of Ruby implementation.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:6506
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
int len
Length of the buffer.
Definition io.h:8
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define ALLOCA_N(type, n)
Definition memory.h:286
#define RBIMPL_ATTR_NORETURN()
Wraps (or simulates) [[noreturn]]
Definition noreturn.h:38
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40