xref: /linux/include/asm-generic/qrwlock.h (revision d8d0da4e)
1c942fddfSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
270af2f8aSWaiman Long /*
370af2f8aSWaiman Long  * Queue read/write lock
470af2f8aSWaiman Long  *
570af2f8aSWaiman Long  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
670af2f8aSWaiman Long  *
770af2f8aSWaiman Long  * Authors: Waiman Long <waiman.long@hp.com>
870af2f8aSWaiman Long  */
970af2f8aSWaiman Long #ifndef __ASM_GENERIC_QRWLOCK_H
1070af2f8aSWaiman Long #define __ASM_GENERIC_QRWLOCK_H
1170af2f8aSWaiman Long 
1270af2f8aSWaiman Long #include <linux/atomic.h>
1370af2f8aSWaiman Long #include <asm/barrier.h>
1470af2f8aSWaiman Long #include <asm/processor.h>
1570af2f8aSWaiman Long 
1670af2f8aSWaiman Long #include <asm-generic/qrwlock_types.h>
17*d8d0da4eSWaiman Long 
18*d8d0da4eSWaiman Long /* Must be included from asm/spinlock.h after defining arch_spin_is_locked.  */
1970af2f8aSWaiman Long 
2070af2f8aSWaiman Long /*
212db34e8bSpan xinhui  * Writer states & reader shift and bias.
2270af2f8aSWaiman Long  */
23d1331661SWill Deacon #define	_QW_WAITING	0x100		/* A writer is waiting	   */
24d1331661SWill Deacon #define	_QW_LOCKED	0x0ff		/* A writer holds the lock */
25d1331661SWill Deacon #define	_QW_WMASK	0x1ff		/* Writer mask		   */
26d1331661SWill Deacon #define	_QR_SHIFT	9		/* Reader count shift	   */
2770af2f8aSWaiman Long #define _QR_BIAS	(1U << _QR_SHIFT)
2870af2f8aSWaiman Long 
2970af2f8aSWaiman Long /*
3070af2f8aSWaiman Long  * External function declarations
3170af2f8aSWaiman Long  */
32b519b56eSWill Deacon extern void queued_read_lock_slowpath(struct qrwlock *lock);
33f7d71f20SWaiman Long extern void queued_write_lock_slowpath(struct qrwlock *lock);
3470af2f8aSWaiman Long 
3570af2f8aSWaiman Long /**
36f7d71f20SWaiman Long  * queued_read_trylock - try to acquire read lock of a queue rwlock
3770af2f8aSWaiman Long  * @lock : Pointer to queue rwlock structure
3870af2f8aSWaiman Long  * Return: 1 if lock acquired, 0 if failed
3970af2f8aSWaiman Long  */
40f7d71f20SWaiman Long static inline int queued_read_trylock(struct qrwlock *lock)
4170af2f8aSWaiman Long {
42f44ca087SArnd Bergmann 	int cnts;
4370af2f8aSWaiman Long 
4470af2f8aSWaiman Long 	cnts = atomic_read(&lock->cnts);
4570af2f8aSWaiman Long 	if (likely(!(cnts & _QW_WMASK))) {
4677e430e3SWill Deacon 		cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
4770af2f8aSWaiman Long 		if (likely(!(cnts & _QW_WMASK)))
4870af2f8aSWaiman Long 			return 1;
4970af2f8aSWaiman Long 		atomic_sub(_QR_BIAS, &lock->cnts);
5070af2f8aSWaiman Long 	}
5170af2f8aSWaiman Long 	return 0;
5270af2f8aSWaiman Long }
5370af2f8aSWaiman Long 
5470af2f8aSWaiman Long /**
55f7d71f20SWaiman Long  * queued_write_trylock - try to acquire write lock of a queue rwlock
5670af2f8aSWaiman Long  * @lock : Pointer to queue rwlock structure
5770af2f8aSWaiman Long  * Return: 1 if lock acquired, 0 if failed
5870af2f8aSWaiman Long  */
59f7d71f20SWaiman Long static inline int queued_write_trylock(struct qrwlock *lock)
6070af2f8aSWaiman Long {
61f44ca087SArnd Bergmann 	int cnts;
6270af2f8aSWaiman Long 
6370af2f8aSWaiman Long 	cnts = atomic_read(&lock->cnts);
6470af2f8aSWaiman Long 	if (unlikely(cnts))
6570af2f8aSWaiman Long 		return 0;
6670af2f8aSWaiman Long 
6727df8968SMatthew Wilcox 	return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
6827df8968SMatthew Wilcox 				_QW_LOCKED));
6970af2f8aSWaiman Long }
7070af2f8aSWaiman Long /**
71f7d71f20SWaiman Long  * queued_read_lock - acquire read lock of a queue rwlock
7270af2f8aSWaiman Long  * @lock: Pointer to queue rwlock structure
7370af2f8aSWaiman Long  */
74f7d71f20SWaiman Long static inline void queued_read_lock(struct qrwlock *lock)
7570af2f8aSWaiman Long {
76f44ca087SArnd Bergmann 	int cnts;
7770af2f8aSWaiman Long 
7877e430e3SWill Deacon 	cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
7970af2f8aSWaiman Long 	if (likely(!(cnts & _QW_WMASK)))
8070af2f8aSWaiman Long 		return;
8170af2f8aSWaiman Long 
8270af2f8aSWaiman Long 	/* The slowpath will decrement the reader count, if necessary. */
83b519b56eSWill Deacon 	queued_read_lock_slowpath(lock);
8470af2f8aSWaiman Long }
8570af2f8aSWaiman Long 
8670af2f8aSWaiman Long /**
87f7d71f20SWaiman Long  * queued_write_lock - acquire write lock of a queue rwlock
8870af2f8aSWaiman Long  * @lock : Pointer to queue rwlock structure
8970af2f8aSWaiman Long  */
90f7d71f20SWaiman Long static inline void queued_write_lock(struct qrwlock *lock)
9170af2f8aSWaiman Long {
92f44ca087SArnd Bergmann 	int cnts = 0;
9370af2f8aSWaiman Long 	/* Optimize for the unfair lock case where the fair flag is 0. */
9427df8968SMatthew Wilcox 	if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
9570af2f8aSWaiman Long 		return;
9670af2f8aSWaiman Long 
97f7d71f20SWaiman Long 	queued_write_lock_slowpath(lock);
9870af2f8aSWaiman Long }
9970af2f8aSWaiman Long 
10070af2f8aSWaiman Long /**
101f7d71f20SWaiman Long  * queued_read_unlock - release read lock of a queue rwlock
10270af2f8aSWaiman Long  * @lock : Pointer to queue rwlock structure
10370af2f8aSWaiman Long  */
104f7d71f20SWaiman Long static inline void queued_read_unlock(struct qrwlock *lock)
10570af2f8aSWaiman Long {
10670af2f8aSWaiman Long 	/*
10770af2f8aSWaiman Long 	 * Atomically decrement the reader count
10870af2f8aSWaiman Long 	 */
10977e430e3SWill Deacon 	(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
11070af2f8aSWaiman Long }
11170af2f8aSWaiman Long 
11270af2f8aSWaiman Long /**
113f7d71f20SWaiman Long  * queued_write_unlock - release write lock of a queue rwlock
11470af2f8aSWaiman Long  * @lock : Pointer to queue rwlock structure
11570af2f8aSWaiman Long  */
116f7d71f20SWaiman Long static inline void queued_write_unlock(struct qrwlock *lock)
11770af2f8aSWaiman Long {
118d1331661SWill Deacon 	smp_store_release(&lock->wlocked, 0);
11970af2f8aSWaiman Long }
12070af2f8aSWaiman Long 
12126128cb6SBen Gardon /**
12226128cb6SBen Gardon  * queued_rwlock_is_contended - check if the lock is contended
12326128cb6SBen Gardon  * @lock : Pointer to queue rwlock structure
12426128cb6SBen Gardon  * Return: 1 if lock contended, 0 otherwise
12526128cb6SBen Gardon  */
12626128cb6SBen Gardon static inline int queued_rwlock_is_contended(struct qrwlock *lock)
12726128cb6SBen Gardon {
12826128cb6SBen Gardon 	return arch_spin_is_locked(&lock->wait_lock);
12926128cb6SBen Gardon }
13026128cb6SBen Gardon 
13170af2f8aSWaiman Long /*
13270af2f8aSWaiman Long  * Remapping rwlock architecture specific functions to the corresponding
13370af2f8aSWaiman Long  * queue rwlock functions.
13470af2f8aSWaiman Long  */
135f7d71f20SWaiman Long #define arch_read_lock(l)		queued_read_lock(l)
136f7d71f20SWaiman Long #define arch_write_lock(l)		queued_write_lock(l)
137f7d71f20SWaiman Long #define arch_read_trylock(l)		queued_read_trylock(l)
138f7d71f20SWaiman Long #define arch_write_trylock(l)		queued_write_trylock(l)
139f7d71f20SWaiman Long #define arch_read_unlock(l)		queued_read_unlock(l)
140f7d71f20SWaiman Long #define arch_write_unlock(l)		queued_write_unlock(l)
14126128cb6SBen Gardon #define arch_rwlock_is_contended(l)	queued_rwlock_is_contended(l)
14270af2f8aSWaiman Long 
14370af2f8aSWaiman Long #endif /* __ASM_GENERIC_QRWLOCK_H */
144