Ruby 3.2.3p157 (2024-01-18 revision 52bb2ac0a6971d0391efa2275f7a66bff319087c)
vm_sync.c
1#include "vm_core.h"
2#include "vm_sync.h"
3#include "ractor_core.h"
4#include "vm_debug.h"
5#include "gc.h"
6
7static bool vm_barrier_finish_p(rb_vm_t *vm);
8
9static bool
10vm_locked(rb_vm_t *vm)
11{
12 return vm->ractor.sync.lock_owner == GET_RACTOR();
13}
14
15#if RUBY_DEBUG > 0
16void
17RUBY_ASSERT_vm_locking(void)
18{
19 if (rb_multi_ractor_p()) {
20 rb_vm_t *vm = GET_VM();
21 VM_ASSERT(vm_locked(vm));
22 }
23}
24
25void
26RUBY_ASSERT_vm_unlocking(void)
27{
28 if (rb_multi_ractor_p()) {
29 rb_vm_t *vm = GET_VM();
30 VM_ASSERT(!vm_locked(vm));
31 }
32}
33#endif
34
35bool
36rb_vm_locked_p(void)
37{
38 return vm_locked(GET_VM());
39}
40
41static void
42vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, bool no_barrier, unsigned int *lev APPEND_LOCATION_ARGS)
43{
44 RUBY_DEBUG_LOG2(file, line, "start locked:%d", locked);
45
46 if (locked) {
47 ASSERT_vm_locking();
48 }
49 else {
50#if RACTOR_CHECK_MODE
51 // locking ractor and acquire VM lock will cause deadlock
52 VM_ASSERT(cr->sync.locked_by != rb_ractor_self(cr));
53#endif
54
55 // lock
56 rb_native_mutex_lock(&vm->ractor.sync.lock);
57 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
58 vm->ractor.sync.lock_owner = cr;
59
60 if (!no_barrier) {
61 // barrier
62 while (vm->ractor.sync.barrier_waiting) {
63 unsigned int barrier_cnt = vm->ractor.sync.barrier_cnt;
64 rb_thread_t *th = GET_THREAD();
65 bool running;
66
67 RB_GC_SAVE_MACHINE_CONTEXT(th);
68
69 if (rb_ractor_status_p(cr, ractor_running)) {
70 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
71 running = true;
72 }
73 else {
74 running = false;
75 }
76 VM_ASSERT(rb_ractor_status_p(cr, ractor_blocking));
77
78 if (vm_barrier_finish_p(vm)) {
79 RUBY_DEBUG_LOG("wakeup barrier owner");
80 rb_native_cond_signal(&vm->ractor.sync.barrier_cond);
81 }
82 else {
83 RUBY_DEBUG_LOG("wait for barrier finish");
84 }
85
86 // wait for restart
87 while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
88 vm->ractor.sync.lock_owner = NULL;
89 rb_native_cond_wait(&cr->barrier_wait_cond, &vm->ractor.sync.lock);
90 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
91 vm->ractor.sync.lock_owner = cr;
92 }
93
94 RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock");
95
96 if (running) {
97 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
98 }
99 }
100 }
101
102 VM_ASSERT(vm->ractor.sync.lock_rec == 0);
103 VM_ASSERT(vm->ractor.sync.lock_owner == cr);
104 }
105
106 vm->ractor.sync.lock_rec++;
107 *lev = vm->ractor.sync.lock_rec;
108
109 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u", vm->ractor.sync.lock_rec,
110 (unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner));
111}
112
113static void
114vm_lock_leave(rb_vm_t *vm, unsigned int *lev APPEND_LOCATION_ARGS)
115{
116 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u", vm->ractor.sync.lock_rec,
117 (unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner));
118
119 ASSERT_vm_locking();
120 VM_ASSERT(vm->ractor.sync.lock_rec > 0);
121 VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
122
123 vm->ractor.sync.lock_rec--;
124 *lev = vm->ractor.sync.lock_rec;
125
126 if (vm->ractor.sync.lock_rec == 0) {
127 vm->ractor.sync.lock_owner = NULL;
128 rb_native_mutex_unlock(&vm->ractor.sync.lock);
129 }
130}
131
132MJIT_FUNC_EXPORTED void
133rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS)
134{
135 rb_vm_t *vm = GET_VM();
136 if (vm_locked(vm)) {
137 vm_lock_enter(NULL, vm, true, false, lev APPEND_LOCATION_PARAMS);
138 }
139 else {
140 vm_lock_enter(GET_RACTOR(), vm, false, false, lev APPEND_LOCATION_PARAMS);
141 }
142}
143
144MJIT_FUNC_EXPORTED void
145rb_vm_lock_enter_body_nb(unsigned int *lev APPEND_LOCATION_ARGS)
146{
147 rb_vm_t *vm = GET_VM();
148 if (vm_locked(vm)) {
149 vm_lock_enter(NULL, vm, true, true, lev APPEND_LOCATION_PARAMS);
150 }
151 else {
152 vm_lock_enter(GET_RACTOR(), vm, false, true, lev APPEND_LOCATION_PARAMS);
153 }
154}
155
156MJIT_FUNC_EXPORTED void
157rb_vm_lock_enter_body_cr(rb_ractor_t *cr, unsigned int *lev APPEND_LOCATION_ARGS)
158{
159 rb_vm_t *vm = GET_VM();
160 vm_lock_enter(cr, vm, vm_locked(vm), false, lev APPEND_LOCATION_PARAMS);
161}
162
163MJIT_FUNC_EXPORTED void
164rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS)
165{
166 vm_lock_leave(GET_VM(), lev APPEND_LOCATION_PARAMS);
167}
168
169void
170rb_vm_lock_body(LOCATION_ARGS)
171{
172 rb_vm_t *vm = GET_VM();
173 ASSERT_vm_unlocking();
174
175 vm_lock_enter(GET_RACTOR(), vm, false, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
176}
177
178void
179rb_vm_unlock_body(LOCATION_ARGS)
180{
181 rb_vm_t *vm = GET_VM();
182 ASSERT_vm_locking();
183 VM_ASSERT(vm->ractor.sync.lock_rec == 1);
184 vm_lock_leave(vm, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
185}
186
187static void
188vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
189{
190 ASSERT_vm_locking();
191 unsigned int lock_rec = vm->ractor.sync.lock_rec;
192 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
193
194 vm->ractor.sync.lock_rec = 0;
195 vm->ractor.sync.lock_owner = NULL;
196 if (msec > 0) {
197 rb_native_cond_timedwait(cond, &vm->ractor.sync.lock, msec);
198 }
199 else {
200 rb_native_cond_wait(cond, &vm->ractor.sync.lock);
201 }
202 vm->ractor.sync.lock_rec = lock_rec;
203 vm->ractor.sync.lock_owner = cr;
204}
205
206void
207rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond)
208{
209 vm_cond_wait(vm, cond, 0);
210}
211
212void
213rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
214{
215 vm_cond_wait(vm, cond, msec);
216}
217
218static bool
219vm_barrier_finish_p(rb_vm_t *vm)
220{
221 RUBY_DEBUG_LOG("cnt:%u living:%u blocking:%u",
222 vm->ractor.sync.barrier_cnt,
223 vm->ractor.cnt,
224 vm->ractor.blocking_cnt);
225
226 VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
227 return vm->ractor.blocking_cnt == vm->ractor.cnt;
228}
229
230void
231rb_vm_barrier(void)
232{
233 RB_DEBUG_COUNTER_INC(vm_sync_barrier);
234
235 if (!rb_multi_ractor_p()) {
236 // no other ractors
237 return;
238 }
239 else {
240 rb_vm_t *vm = GET_VM();
241 VM_ASSERT(vm->ractor.sync.barrier_waiting == false);
242 ASSERT_vm_locking();
243
244 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
245 VM_ASSERT(cr == GET_RACTOR());
246 VM_ASSERT(rb_ractor_status_p(cr, ractor_running));
247
248 vm->ractor.sync.barrier_waiting = true;
249
250 RUBY_DEBUG_LOG("barrier start. cnt:%u living:%u blocking:%u",
251 vm->ractor.sync.barrier_cnt,
252 vm->ractor.cnt,
253 vm->ractor.blocking_cnt);
254
255 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
256
257 // send signal
258 rb_ractor_t *r = 0;
259 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
260 if (r != cr) {
261 rb_ractor_vm_barrier_interrupt_running_thread(r);
262 }
263 }
264
265 // wait
266 while (!vm_barrier_finish_p(vm)) {
267 rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_cond);
268 }
269
270 RUBY_DEBUG_LOG("cnt:%u barrier success", vm->ractor.sync.barrier_cnt);
271
272 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
273
274 vm->ractor.sync.barrier_waiting = false;
275 vm->ractor.sync.barrier_cnt++;
276
277 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
278 rb_native_cond_signal(&r->barrier_wait_cond);
279 }
280 }
281}
282
283void
284rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
285 unsigned int recorded_lock_rec,
286 unsigned int current_lock_rec)
287{
288 VM_ASSERT(recorded_lock_rec != current_lock_rec);
289
290 if (UNLIKELY(recorded_lock_rec > current_lock_rec)) {
291 rb_bug("unexpected situation - recordd:%u current:%u",
292 recorded_lock_rec, current_lock_rec);
293 }
294 else {
295 while (recorded_lock_rec < current_lock_rec) {
296 RB_VM_LOCK_LEAVE_LEV(&current_lock_rec);
297 }
298 }
299
300 VM_ASSERT(recorded_lock_rec == rb_ec_vm_lock_rec(ec));
301}
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition error.c:794
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.