1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PERCPU_RWSEM_H 3 #define _LINUX_PERCPU_RWSEM_H 4 5 #include <linux/atomic.h> 6 #include <linux/rwsem.h> 7 #include <linux/percpu.h> 8 #include <linux/rcuwait.h> 9 #include <linux/rcu_sync.h> 10 #include <linux/lockdep.h> 11 12 struct percpu_rw_semaphore { 13 struct rcu_sync rss; 14 unsigned int __percpu *read_count; 15 struct rw_semaphore rw_sem; /* slowpath */ 16 struct rcuwait writer; /* blocked writer */ 17 int readers_block; 18 }; 19 20 #define DEFINE_STATIC_PERCPU_RWSEM(name) \ 21 static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ 22 static struct percpu_rw_semaphore name = { \ 23 .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ 24 .read_count = &__percpu_rwsem_rc_##name, \ 25 .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ 26 .writer = __RCUWAIT_INITIALIZER(name.writer), \ 27 } 28 29 extern int __percpu_down_read(struct percpu_rw_semaphore *, int); 30 extern void __percpu_up_read(struct percpu_rw_semaphore *); 31 32 static inline void percpu_down_read(struct percpu_rw_semaphore *sem) 33 { 34 might_sleep(); 35 36 rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_); 37 38 preempt_disable(); 39 /* 40 * We are in an RCU-sched read-side critical section, so the writer 41 * cannot both change sem->state from readers_fast and start checking 42 * counters while we are here. So if we see !sem->state, we know that 43 * the writer won't be checking until we're past the preempt_enable() 44 * and that once the synchronize_rcu() is done, the writer will see 45 * anything we did within this RCU-sched read-size critical section. 46 */ 47 __this_cpu_inc(*sem->read_count); 48 if (unlikely(!rcu_sync_is_idle(&sem->rss))) 49 __percpu_down_read(sem, false); /* Unconditional memory barrier */ 50 /* 51 * The preempt_enable() prevents the compiler from 52 * bleeding the critical section out. 53 */ 54 preempt_enable(); 55 } 56 57 static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) 58 { 59 int ret = 1; 60 61 preempt_disable(); 62 /* 63 * Same as in percpu_down_read(). 64 */ 65 __this_cpu_inc(*sem->read_count); 66 if (unlikely(!rcu_sync_is_idle(&sem->rss))) 67 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ 68 preempt_enable(); 69 /* 70 * The barrier() from preempt_enable() prevents the compiler from 71 * bleeding the critical section out. 72 */ 73 74 if (ret) 75 rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_); 76 77 return ret; 78 } 79 80 static inline void percpu_up_read(struct percpu_rw_semaphore *sem) 81 { 82 preempt_disable(); 83 /* 84 * Same as in percpu_down_read(). 85 */ 86 if (likely(rcu_sync_is_idle(&sem->rss))) 87 __this_cpu_dec(*sem->read_count); 88 else 89 __percpu_up_read(sem); /* Unconditional memory barrier */ 90 preempt_enable(); 91 92 rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); 93 } 94 95 extern void percpu_down_write(struct percpu_rw_semaphore *); 96 extern void percpu_up_write(struct percpu_rw_semaphore *); 97 98 extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, 99 const char *, struct lock_class_key *); 100 101 extern void percpu_free_rwsem(struct percpu_rw_semaphore *); 102 103 #define percpu_init_rwsem(sem) \ 104 ({ \ 105 static struct lock_class_key rwsem_key; \ 106 __percpu_init_rwsem(sem, #sem, &rwsem_key); \ 107 }) 108 109 #define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem) 110 111 #define percpu_rwsem_assert_held(sem) \ 112 lockdep_assert_held(&(sem)->rw_sem) 113 114 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, 115 bool read, unsigned long ip) 116 { 117 lock_release(&sem->rw_sem.dep_map, 1, ip); 118 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 119 if (!read) 120 sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN; 121 #endif 122 } 123 124 static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, 125 bool read, unsigned long ip) 126 { 127 lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); 128 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 129 if (!read) 130 sem->rw_sem.owner = current; 131 #endif 132 } 133 134 #endif 135