1 /* Public domain. */ 2 3 #ifndef _LINUX_SPINLOCK_H 4 #define _LINUX_SPINLOCK_H 5 6 #include <linux/kernel.h> 7 #include <linux/spinlock_types.h> 8 #include <linux/preempt.h> 9 #include <linux/bottom_half.h> 10 #include <linux/atomic.h> 11 12 #define spin_lock_irqsave(_mtxp, _flags) do { \ 13 _flags = 0; \ 14 mtx_enter(_mtxp); \ 15 } while (0) 16 17 #define spin_lock_irqsave_nested(_mtxp, _flags, _subclass) do { \ 18 (void)(_subclass); \ 19 _flags = 0; \ 20 mtx_enter(_mtxp); \ 21 } while (0) 22 23 #define spin_unlock_irqrestore(_mtxp, _flags) do { \ 24 (void)(_flags); \ 25 mtx_leave(_mtxp); \ 26 } while (0) 27 28 #define spin_trylock_irqsave(_mtxp, _flags) \ 29 ({ \ 30 (void)(_flags); \ 31 mtx_enter_try(_mtxp) ? 1 : 0; \ 32 }) 33 34 static inline int 35 atomic_dec_and_lock(volatile int *v, struct mutex *mtxp) 36 { 37 if (*v != 1) { 38 atomic_dec(v); 39 return 0; 40 } 41 42 mtx_enter(mtxp); 43 atomic_dec(v); 44 return 1; 45 } 46 47 #define atomic_dec_and_lock_irqsave(_a, _mtxp, _flags) \ 48 atomic_dec_and_lock(_a, _mtxp) 49 50 #define spin_lock(mtxp) mtx_enter(mtxp) 51 #define spin_lock_nested(mtxp, l) mtx_enter(mtxp) 52 #define spin_unlock(mtxp) mtx_leave(mtxp) 53 #define spin_lock_irq(mtxp) mtx_enter(mtxp) 54 #define spin_unlock_irq(mtxp) mtx_leave(mtxp) 55 #define assert_spin_locked(mtxp) MUTEX_ASSERT_LOCKED(mtxp) 56 #define spin_trylock_irq(mtxp) mtx_enter_try(mtxp) 57 58 #define read_lock(mtxp) mtx_enter(mtxp) 59 #define read_unlock(mtxp) mtx_leave(mtxp) 60 #define write_lock(mtxp) mtx_enter(mtxp) 61 #define write_unlock(mtxp) mtx_leave(mtxp) 62 63 #endif 64