1 /* Public domain. */
2
3 #ifndef _LINUX_SPINLOCK_H
4 #define _LINUX_SPINLOCK_H
5
6 #include <linux/spinlock_types.h>
7 #include <linux/preempt.h>
8 #include <linux/bottom_half.h>
9 #include <linux/atomic.h>
10 #include <linux/lockdep.h>
11
12 #define spin_lock_irqsave(_mtxp, _flags) do { \
13 _flags = 0; \
14 mtx_enter(_mtxp); \
15 } while (0)
16
17 #define spin_lock_irqsave_nested(_mtxp, _flags, _subclass) do { \
18 (void)(_subclass); \
19 _flags = 0; \
20 mtx_enter(_mtxp); \
21 } while (0)
22
23 #define spin_unlock_irqrestore(_mtxp, _flags) do { \
24 (void)(_flags); \
25 mtx_leave(_mtxp); \
26 } while (0)
27
28 #define spin_trylock(_mtxp) \
29 ({ \
30 mtx_enter_try(_mtxp) ? 1 : 0; \
31 })
32
33 #define spin_trylock_irqsave(_mtxp, _flags) \
34 ({ \
35 (void)(_flags); \
36 mtx_enter_try(_mtxp) ? 1 : 0; \
37 })
38
39 static inline int
atomic_dec_and_lock(volatile int * v,struct mutex * mtxp)40 atomic_dec_and_lock(volatile int *v, struct mutex *mtxp)
41 {
42 if (*v != 1) {
43 atomic_dec(v);
44 return 0;
45 }
46
47 mtx_enter(mtxp);
48 atomic_dec(v);
49 return 1;
50 }
51
52 #define atomic_dec_and_lock_irqsave(_a, _mtxp, _flags) \
53 atomic_dec_and_lock(_a, _mtxp)
54
55 #define spin_lock(mtxp) mtx_enter(mtxp)
56 #define spin_lock_nested(mtxp, l) mtx_enter(mtxp)
57 #define spin_unlock(mtxp) mtx_leave(mtxp)
58 #define spin_lock_irq(mtxp) mtx_enter(mtxp)
59 #define spin_unlock_irq(mtxp) mtx_leave(mtxp)
60 #define assert_spin_locked(mtxp) MUTEX_ASSERT_LOCKED(mtxp)
61 #define spin_trylock_irq(mtxp) mtx_enter_try(mtxp)
62
63 #define read_lock(mtxp) mtx_enter(mtxp)
64 #define read_unlock(mtxp) mtx_leave(mtxp)
65 #define write_lock(mtxp) mtx_enter(mtxp)
66 #define write_unlock(mtxp) mtx_leave(mtxp)
67
68 #endif
69