xref: /linux/include/asm-generic/qrwlock.h (revision f44ca087)
1c942fddfSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
270af2f8aSWaiman Long /*
370af2f8aSWaiman Long  * Queue read/write lock
470af2f8aSWaiman Long  *
570af2f8aSWaiman Long  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
670af2f8aSWaiman Long  *
770af2f8aSWaiman Long  * Authors: Waiman Long <waiman.long@hp.com>
870af2f8aSWaiman Long  */
970af2f8aSWaiman Long #ifndef __ASM_GENERIC_QRWLOCK_H
1070af2f8aSWaiman Long #define __ASM_GENERIC_QRWLOCK_H
1170af2f8aSWaiman Long 
1270af2f8aSWaiman Long #include <linux/atomic.h>
1370af2f8aSWaiman Long #include <asm/barrier.h>
1470af2f8aSWaiman Long #include <asm/processor.h>
1570af2f8aSWaiman Long 
1670af2f8aSWaiman Long #include <asm-generic/qrwlock_types.h>
1770af2f8aSWaiman Long 
1870af2f8aSWaiman Long /*
192db34e8bSpan xinhui  * Writer states & reader shift and bias.
2070af2f8aSWaiman Long  */
21d1331661SWill Deacon #define	_QW_WAITING	0x100		/* A writer is waiting	   */
22d1331661SWill Deacon #define	_QW_LOCKED	0x0ff		/* A writer holds the lock */
23d1331661SWill Deacon #define	_QW_WMASK	0x1ff		/* Writer mask		   */
24d1331661SWill Deacon #define	_QR_SHIFT	9		/* Reader count shift	   */
2570af2f8aSWaiman Long #define _QR_BIAS	(1U << _QR_SHIFT)
2670af2f8aSWaiman Long 
2770af2f8aSWaiman Long /*
2870af2f8aSWaiman Long  * External function declarations
2970af2f8aSWaiman Long  */
30b519b56eSWill Deacon extern void queued_read_lock_slowpath(struct qrwlock *lock);
31f7d71f20SWaiman Long extern void queued_write_lock_slowpath(struct qrwlock *lock);
3270af2f8aSWaiman Long 
3370af2f8aSWaiman Long /**
34f7d71f20SWaiman Long  * queued_read_trylock - try to acquire read lock of a queue rwlock
3570af2f8aSWaiman Long  * @lock : Pointer to queue rwlock structure
3670af2f8aSWaiman Long  * Return: 1 if lock acquired, 0 if failed
3770af2f8aSWaiman Long  */
38f7d71f20SWaiman Long static inline int queued_read_trylock(struct qrwlock *lock)
3970af2f8aSWaiman Long {
40*f44ca087SArnd Bergmann 	int cnts;
4170af2f8aSWaiman Long 
4270af2f8aSWaiman Long 	cnts = atomic_read(&lock->cnts);
4370af2f8aSWaiman Long 	if (likely(!(cnts & _QW_WMASK))) {
4477e430e3SWill Deacon 		cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
4570af2f8aSWaiman Long 		if (likely(!(cnts & _QW_WMASK)))
4670af2f8aSWaiman Long 			return 1;
4770af2f8aSWaiman Long 		atomic_sub(_QR_BIAS, &lock->cnts);
4870af2f8aSWaiman Long 	}
4970af2f8aSWaiman Long 	return 0;
5070af2f8aSWaiman Long }
5170af2f8aSWaiman Long 
5270af2f8aSWaiman Long /**
53f7d71f20SWaiman Long  * queued_write_trylock - try to acquire write lock of a queue rwlock
5470af2f8aSWaiman Long  * @lock : Pointer to queue rwlock structure
5570af2f8aSWaiman Long  * Return: 1 if lock acquired, 0 if failed
5670af2f8aSWaiman Long  */
57f7d71f20SWaiman Long static inline int queued_write_trylock(struct qrwlock *lock)
5870af2f8aSWaiman Long {
59*f44ca087SArnd Bergmann 	int cnts;
6070af2f8aSWaiman Long 
6170af2f8aSWaiman Long 	cnts = atomic_read(&lock->cnts);
6270af2f8aSWaiman Long 	if (unlikely(cnts))
6370af2f8aSWaiman Long 		return 0;
6470af2f8aSWaiman Long 
6527df8968SMatthew Wilcox 	return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
6627df8968SMatthew Wilcox 				_QW_LOCKED));
6770af2f8aSWaiman Long }
6870af2f8aSWaiman Long /**
69f7d71f20SWaiman Long  * queued_read_lock - acquire read lock of a queue rwlock
7070af2f8aSWaiman Long  * @lock: Pointer to queue rwlock structure
7170af2f8aSWaiman Long  */
72f7d71f20SWaiman Long static inline void queued_read_lock(struct qrwlock *lock)
7370af2f8aSWaiman Long {
74*f44ca087SArnd Bergmann 	int cnts;
7570af2f8aSWaiman Long 
7677e430e3SWill Deacon 	cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
7770af2f8aSWaiman Long 	if (likely(!(cnts & _QW_WMASK)))
7870af2f8aSWaiman Long 		return;
7970af2f8aSWaiman Long 
8070af2f8aSWaiman Long 	/* The slowpath will decrement the reader count, if necessary. */
81b519b56eSWill Deacon 	queued_read_lock_slowpath(lock);
8270af2f8aSWaiman Long }
8370af2f8aSWaiman Long 
8470af2f8aSWaiman Long /**
85f7d71f20SWaiman Long  * queued_write_lock - acquire write lock of a queue rwlock
8670af2f8aSWaiman Long  * @lock : Pointer to queue rwlock structure
8770af2f8aSWaiman Long  */
88f7d71f20SWaiman Long static inline void queued_write_lock(struct qrwlock *lock)
8970af2f8aSWaiman Long {
90*f44ca087SArnd Bergmann 	int cnts = 0;
9170af2f8aSWaiman Long 	/* Optimize for the unfair lock case where the fair flag is 0. */
9227df8968SMatthew Wilcox 	if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
9370af2f8aSWaiman Long 		return;
9470af2f8aSWaiman Long 
95f7d71f20SWaiman Long 	queued_write_lock_slowpath(lock);
9670af2f8aSWaiman Long }
9770af2f8aSWaiman Long 
9870af2f8aSWaiman Long /**
99f7d71f20SWaiman Long  * queued_read_unlock - release read lock of a queue rwlock
10070af2f8aSWaiman Long  * @lock : Pointer to queue rwlock structure
10170af2f8aSWaiman Long  */
102f7d71f20SWaiman Long static inline void queued_read_unlock(struct qrwlock *lock)
10370af2f8aSWaiman Long {
10470af2f8aSWaiman Long 	/*
10570af2f8aSWaiman Long 	 * Atomically decrement the reader count
10670af2f8aSWaiman Long 	 */
10777e430e3SWill Deacon 	(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
10870af2f8aSWaiman Long }
10970af2f8aSWaiman Long 
11070af2f8aSWaiman Long /**
111f7d71f20SWaiman Long  * queued_write_unlock - release write lock of a queue rwlock
11270af2f8aSWaiman Long  * @lock : Pointer to queue rwlock structure
11370af2f8aSWaiman Long  */
114f7d71f20SWaiman Long static inline void queued_write_unlock(struct qrwlock *lock)
11570af2f8aSWaiman Long {
116d1331661SWill Deacon 	smp_store_release(&lock->wlocked, 0);
11770af2f8aSWaiman Long }
11870af2f8aSWaiman Long 
11970af2f8aSWaiman Long /*
12070af2f8aSWaiman Long  * Remapping rwlock architecture specific functions to the corresponding
12170af2f8aSWaiman Long  * queue rwlock functions.
12270af2f8aSWaiman Long  */
123f7d71f20SWaiman Long #define arch_read_lock(l)	queued_read_lock(l)
124f7d71f20SWaiman Long #define arch_write_lock(l)	queued_write_lock(l)
125f7d71f20SWaiman Long #define arch_read_trylock(l)	queued_read_trylock(l)
126f7d71f20SWaiman Long #define arch_write_trylock(l)	queued_write_trylock(l)
127f7d71f20SWaiman Long #define arch_read_unlock(l)	queued_read_unlock(l)
128f7d71f20SWaiman Long #define arch_write_unlock(l)	queued_write_unlock(l)
12970af2f8aSWaiman Long 
13070af2f8aSWaiman Long #endif /* __ASM_GENERIC_QRWLOCK_H */
131