1//! A lock that supports one writer or many readers.
2//! This API is for kernel threads, not evented I/O.
3//! This API requires being initialized at runtime, and initialization
4//! can fail. Once initialized, the core operations cannot fail.
5
6impl: Impl,
7
8const RwLock = @This();
9const std = @import("../std.zig");
10const builtin = @import("builtin");
11const assert = std.debug.assert;
12const Mutex = std.Thread.Mutex;
13const Semaphore = std.Semaphore;
14const CondVar = std.CondVar;
15
16pub const Impl = if (builtin.single_threaded)
17    SingleThreadedRwLock
18else if (std.Thread.use_pthreads)
19    PthreadRwLock
20else
21    DefaultRwLock;
22
23pub fn init(rwl: *RwLock) void {
24    return rwl.impl.init();
25}
26
27pub fn deinit(rwl: *RwLock) void {
28    return rwl.impl.deinit();
29}
30
31/// Attempts to obtain exclusive lock ownership.
32/// Returns `true` if the lock is obtained, `false` otherwise.
33pub fn tryLock(rwl: *RwLock) bool {
34    return rwl.impl.tryLock();
35}
36
37/// Blocks until exclusive lock ownership is acquired.
38pub fn lock(rwl: *RwLock) void {
39    return rwl.impl.lock();
40}
41
42/// Releases a held exclusive lock.
43/// Asserts the lock is held exclusively.
44pub fn unlock(rwl: *RwLock) void {
45    return rwl.impl.unlock();
46}
47
48/// Attempts to obtain shared lock ownership.
49/// Returns `true` if the lock is obtained, `false` otherwise.
50pub fn tryLockShared(rwl: *RwLock) bool {
51    return rwl.impl.tryLockShared();
52}
53
54/// Blocks until shared lock ownership is acquired.
55pub fn lockShared(rwl: *RwLock) void {
56    return rwl.impl.lockShared();
57}
58
59/// Releases a held shared lock.
60pub fn unlockShared(rwl: *RwLock) void {
61    return rwl.impl.unlockShared();
62}
63
64/// Single-threaded applications use this for deadlock checks in
65/// debug mode, and no-ops in release modes.
66pub const SingleThreadedRwLock = struct {
67    state: enum { unlocked, locked_exclusive, locked_shared },
68    shared_count: usize,
69
70    pub fn init(rwl: *SingleThreadedRwLock) void {
71        rwl.* = .{
72            .state = .unlocked,
73            .shared_count = 0,
74        };
75    }
76
77    pub fn deinit(rwl: *SingleThreadedRwLock) void {
78        assert(rwl.state == .unlocked);
79        assert(rwl.shared_count == 0);
80    }
81
82    /// Attempts to obtain exclusive lock ownership.
83    /// Returns `true` if the lock is obtained, `false` otherwise.
84    pub fn tryLock(rwl: *SingleThreadedRwLock) bool {
85        switch (rwl.state) {
86            .unlocked => {
87                assert(rwl.shared_count == 0);
88                rwl.state = .locked_exclusive;
89                return true;
90            },
91            .locked_exclusive, .locked_shared => return false,
92        }
93    }
94
95    /// Blocks until exclusive lock ownership is acquired.
96    pub fn lock(rwl: *SingleThreadedRwLock) void {
97        assert(rwl.state == .unlocked); // deadlock detected
98        assert(rwl.shared_count == 0); // corrupted state detected
99        rwl.state = .locked_exclusive;
100    }
101
102    /// Releases a held exclusive lock.
103    /// Asserts the lock is held exclusively.
104    pub fn unlock(rwl: *SingleThreadedRwLock) void {
105        assert(rwl.state == .locked_exclusive);
106        assert(rwl.shared_count == 0); // corrupted state detected
107        rwl.state = .unlocked;
108    }
109
110    /// Attempts to obtain shared lock ownership.
111    /// Returns `true` if the lock is obtained, `false` otherwise.
112    pub fn tryLockShared(rwl: *SingleThreadedRwLock) bool {
113        switch (rwl.state) {
114            .unlocked => {
115                rwl.state = .locked_shared;
116                assert(rwl.shared_count == 0);
117                rwl.shared_count = 1;
118                return true;
119            },
120            .locked_exclusive, .locked_shared => return false,
121        }
122    }
123
124    /// Blocks until shared lock ownership is acquired.
125    pub fn lockShared(rwl: *SingleThreadedRwLock) void {
126        switch (rwl.state) {
127            .unlocked => {
128                rwl.state = .locked_shared;
129                assert(rwl.shared_count == 0);
130                rwl.shared_count = 1;
131            },
132            .locked_shared => {
133                rwl.shared_count += 1;
134            },
135            .locked_exclusive => unreachable, // deadlock detected
136        }
137    }
138
139    /// Releases a held shared lock.
140    pub fn unlockShared(rwl: *SingleThreadedRwLock) void {
141        switch (rwl.state) {
142            .unlocked => unreachable, // too many calls to `unlockShared`
143            .locked_exclusive => unreachable, // exclusively held lock
144            .locked_shared => {
145                rwl.shared_count -= 1;
146                if (rwl.shared_count == 0) {
147                    rwl.state = .unlocked;
148                }
149            },
150        }
151    }
152};
153
154pub const PthreadRwLock = struct {
155    rwlock: pthread_rwlock_t,
156
157    pub fn init(rwl: *PthreadRwLock) void {
158        rwl.* = .{ .rwlock = .{} };
159    }
160
161    pub fn deinit(rwl: *PthreadRwLock) void {
162        const safe_rc: std.os.E = switch (builtin.os.tag) {
163            .dragonfly, .netbsd => .AGAIN,
164            else => .SUCCESS,
165        };
166        const rc = std.c.pthread_rwlock_destroy(&rwl.rwlock);
167        assert(rc == .SUCCESS or rc == safe_rc);
168        rwl.* = undefined;
169    }
170
171    pub fn tryLock(rwl: *PthreadRwLock) bool {
172        return pthread_rwlock_trywrlock(&rwl.rwlock) == .SUCCESS;
173    }
174
175    pub fn lock(rwl: *PthreadRwLock) void {
176        const rc = pthread_rwlock_wrlock(&rwl.rwlock);
177        assert(rc == .SUCCESS);
178    }
179
180    pub fn unlock(rwl: *PthreadRwLock) void {
181        const rc = pthread_rwlock_unlock(&rwl.rwlock);
182        assert(rc == .SUCCESS);
183    }
184
185    pub fn tryLockShared(rwl: *PthreadRwLock) bool {
186        return pthread_rwlock_tryrdlock(&rwl.rwlock) == .SUCCESS;
187    }
188
189    pub fn lockShared(rwl: *PthreadRwLock) void {
190        const rc = pthread_rwlock_rdlock(&rwl.rwlock);
191        assert(rc == .SUCCESS);
192    }
193
194    pub fn unlockShared(rwl: *PthreadRwLock) void {
195        const rc = pthread_rwlock_unlock(&rwl.rwlock);
196        assert(rc == .SUCCESS);
197    }
198};
199
200pub const DefaultRwLock = struct {
201    state: usize,
202    mutex: Mutex,
203    semaphore: Semaphore,
204
205    const IS_WRITING: usize = 1;
206    const WRITER: usize = 1 << 1;
207    const READER: usize = 1 << (1 + std.meta.bitCount(Count));
208    const WRITER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, WRITER);
209    const READER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, READER);
210    const Count = std.meta.Int(.unsigned, @divFloor(std.meta.bitCount(usize) - 1, 2));
211
212    pub fn init(rwl: *DefaultRwLock) void {
213        rwl.* = .{
214            .state = 0,
215            .mutex = Mutex.init(),
216            .semaphore = Semaphore.init(0),
217        };
218    }
219
220    pub fn deinit(rwl: *DefaultRwLock) void {
221        rwl.semaphore.deinit();
222        rwl.mutex.deinit();
223        rwl.* = undefined;
224    }
225
226    pub fn tryLock(rwl: *DefaultRwLock) bool {
227        if (rwl.mutex.tryLock()) {
228            const state = @atomicLoad(usize, &rwl.state, .SeqCst);
229            if (state & READER_MASK == 0) {
230                _ = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .SeqCst);
231                return true;
232            }
233
234            rwl.mutex.unlock();
235        }
236
237        return false;
238    }
239
240    pub fn lock(rwl: *DefaultRwLock) void {
241        _ = @atomicRmw(usize, &rwl.state, .Add, WRITER, .SeqCst);
242        rwl.mutex.lock();
243
244        const state = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .SeqCst);
245        if (state & READER_MASK != 0)
246            rwl.semaphore.wait();
247    }
248
249    pub fn unlock(rwl: *DefaultRwLock) void {
250        _ = @atomicRmw(usize, &rwl.state, .And, ~IS_WRITING, .SeqCst);
251        rwl.mutex.unlock();
252    }
253
254    pub fn tryLockShared(rwl: *DefaultRwLock) bool {
255        const state = @atomicLoad(usize, &rwl.state, .SeqCst);
256        if (state & (IS_WRITING | WRITER_MASK) == 0) {
257            _ = @cmpxchgStrong(
258                usize,
259                &rwl.state,
260                state,
261                state + READER,
262                .SeqCst,
263                .SeqCst,
264            ) orelse return true;
265        }
266
267        if (rwl.mutex.tryLock()) {
268            _ = @atomicRmw(usize, &rwl.state, .Add, READER, .SeqCst);
269            rwl.mutex.unlock();
270            return true;
271        }
272
273        return false;
274    }
275
276    pub fn lockShared(rwl: *DefaultRwLock) void {
277        var state = @atomicLoad(usize, &rwl.state, .SeqCst);
278        while (state & (IS_WRITING | WRITER_MASK) == 0) {
279            state = @cmpxchgWeak(
280                usize,
281                &rwl.state,
282                state,
283                state + READER,
284                .SeqCst,
285                .SeqCst,
286            ) orelse return;
287        }
288
289        rwl.mutex.lock();
290        _ = @atomicRmw(usize, &rwl.state, .Add, READER, .SeqCst);
291        rwl.mutex.unlock();
292    }
293
294    pub fn unlockShared(rwl: *DefaultRwLock) void {
295        const state = @atomicRmw(usize, &rwl.state, .Sub, READER, .SeqCst);
296
297        if ((state & READER_MASK == READER) and (state & IS_WRITING != 0))
298            rwl.semaphore.post();
299    }
300};
301