xref: /linux/arch/arm64/include/asm/preempt.h (revision 1b2d3451)
139624469SWill Deacon /* SPDX-License-Identifier: GPL-2.0 */
239624469SWill Deacon #ifndef __ASM_PREEMPT_H
339624469SWill Deacon #define __ASM_PREEMPT_H
439624469SWill Deacon 
51b2d3451SMark Rutland #include <linux/jump_label.h>
639624469SWill Deacon #include <linux/thread_info.h>
739624469SWill Deacon 
839624469SWill Deacon #define PREEMPT_NEED_RESCHED	BIT(32)
939624469SWill Deacon #define PREEMPT_ENABLED	(PREEMPT_NEED_RESCHED)
1039624469SWill Deacon 
preempt_count(void)1139624469SWill Deacon static inline int preempt_count(void)
1239624469SWill Deacon {
1339624469SWill Deacon 	return READ_ONCE(current_thread_info()->preempt.count);
1439624469SWill Deacon }
1539624469SWill Deacon 
preempt_count_set(u64 pc)1639624469SWill Deacon static inline void preempt_count_set(u64 pc)
1739624469SWill Deacon {
1839624469SWill Deacon 	/* Preserve existing value of PREEMPT_NEED_RESCHED */
1939624469SWill Deacon 	WRITE_ONCE(current_thread_info()->preempt.count, pc);
2039624469SWill Deacon }
2139624469SWill Deacon 
2239624469SWill Deacon #define init_task_preempt_count(p) do { \
2339624469SWill Deacon 	task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
2439624469SWill Deacon } while (0)
2539624469SWill Deacon 
2639624469SWill Deacon #define init_idle_preempt_count(p, cpu) do { \
27f1a0a376SValentin Schneider 	task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
2839624469SWill Deacon } while (0)
2939624469SWill Deacon 
set_preempt_need_resched(void)3039624469SWill Deacon static inline void set_preempt_need_resched(void)
3139624469SWill Deacon {
3239624469SWill Deacon 	current_thread_info()->preempt.need_resched = 0;
3339624469SWill Deacon }
3439624469SWill Deacon 
clear_preempt_need_resched(void)3539624469SWill Deacon static inline void clear_preempt_need_resched(void)
3639624469SWill Deacon {
3739624469SWill Deacon 	current_thread_info()->preempt.need_resched = 1;
3839624469SWill Deacon }
3939624469SWill Deacon 
test_preempt_need_resched(void)4039624469SWill Deacon static inline bool test_preempt_need_resched(void)
4139624469SWill Deacon {
4239624469SWill Deacon 	return !current_thread_info()->preempt.need_resched;
4339624469SWill Deacon }
4439624469SWill Deacon 
__preempt_count_add(int val)4539624469SWill Deacon static inline void __preempt_count_add(int val)
4639624469SWill Deacon {
4739624469SWill Deacon 	u32 pc = READ_ONCE(current_thread_info()->preempt.count);
4839624469SWill Deacon 	pc += val;
4939624469SWill Deacon 	WRITE_ONCE(current_thread_info()->preempt.count, pc);
5039624469SWill Deacon }
5139624469SWill Deacon 
__preempt_count_sub(int val)5239624469SWill Deacon static inline void __preempt_count_sub(int val)
5339624469SWill Deacon {
5439624469SWill Deacon 	u32 pc = READ_ONCE(current_thread_info()->preempt.count);
5539624469SWill Deacon 	pc -= val;
5639624469SWill Deacon 	WRITE_ONCE(current_thread_info()->preempt.count, pc);
5739624469SWill Deacon }
5839624469SWill Deacon 
__preempt_count_dec_and_test(void)5939624469SWill Deacon static inline bool __preempt_count_dec_and_test(void)
6039624469SWill Deacon {
6139624469SWill Deacon 	struct thread_info *ti = current_thread_info();
6239624469SWill Deacon 	u64 pc = READ_ONCE(ti->preempt_count);
6339624469SWill Deacon 
6439624469SWill Deacon 	/* Update only the count field, leaving need_resched unchanged */
6539624469SWill Deacon 	WRITE_ONCE(ti->preempt.count, --pc);
6639624469SWill Deacon 
6739624469SWill Deacon 	/*
6839624469SWill Deacon 	 * If we wrote back all zeroes, then we're preemptible and in
6939624469SWill Deacon 	 * need of a reschedule. Otherwise, we need to reload the
7039624469SWill Deacon 	 * preempt_count in case the need_resched flag was cleared by an
7139624469SWill Deacon 	 * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
7239624469SWill Deacon 	 * pair.
7339624469SWill Deacon 	 */
7439624469SWill Deacon 	return !pc || !READ_ONCE(ti->preempt_count);
7539624469SWill Deacon }
7639624469SWill Deacon 
should_resched(int preempt_offset)7739624469SWill Deacon static inline bool should_resched(int preempt_offset)
7839624469SWill Deacon {
7939624469SWill Deacon 	u64 pc = READ_ONCE(current_thread_info()->preempt_count);
8039624469SWill Deacon 	return pc == preempt_offset;
8139624469SWill Deacon }
8239624469SWill Deacon 
837ef858daSThomas Gleixner #ifdef CONFIG_PREEMPTION
841b2d3451SMark Rutland 
8539624469SWill Deacon void preempt_schedule(void);
8639624469SWill Deacon void preempt_schedule_notrace(void);
871b2d3451SMark Rutland 
881b2d3451SMark Rutland #ifdef CONFIG_PREEMPT_DYNAMIC
891b2d3451SMark Rutland 
901b2d3451SMark Rutland DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
911b2d3451SMark Rutland void dynamic_preempt_schedule(void);
921b2d3451SMark Rutland #define __preempt_schedule()		dynamic_preempt_schedule()
931b2d3451SMark Rutland void dynamic_preempt_schedule_notrace(void);
941b2d3451SMark Rutland #define __preempt_schedule_notrace()	dynamic_preempt_schedule_notrace()
951b2d3451SMark Rutland 
961b2d3451SMark Rutland #else /* CONFIG_PREEMPT_DYNAMIC */
971b2d3451SMark Rutland 
981b2d3451SMark Rutland #define __preempt_schedule()		preempt_schedule()
9939624469SWill Deacon #define __preempt_schedule_notrace()	preempt_schedule_notrace()
1001b2d3451SMark Rutland 
1011b2d3451SMark Rutland #endif /* CONFIG_PREEMPT_DYNAMIC */
1027ef858daSThomas Gleixner #endif /* CONFIG_PREEMPTION */
10339624469SWill Deacon 
10439624469SWill Deacon #endif /* __ASM_PREEMPT_H */
105