xref: /linux/arch/s390/include/asm/preempt.h (revision c9c26068)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2c360192bSMartin Schwidefsky #ifndef __ASM_PREEMPT_H
3c360192bSMartin Schwidefsky #define __ASM_PREEMPT_H
4c360192bSMartin Schwidefsky 
5c360192bSMartin Schwidefsky #include <asm/current.h>
6c360192bSMartin Schwidefsky #include <linux/thread_info.h>
7c360192bSMartin Schwidefsky #include <asm/atomic_ops.h>
8c360192bSMartin Schwidefsky 
9c360192bSMartin Schwidefsky #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
10c360192bSMartin Schwidefsky 
1108861d33SWill Deacon /* We use the MSB mostly because its available */
1208861d33SWill Deacon #define PREEMPT_NEED_RESCHED	0x80000000
13c360192bSMartin Schwidefsky #define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED)
14c360192bSMartin Schwidefsky 
preempt_count(void)15*c9c26068SIlya Leoshkevich static __always_inline int preempt_count(void)
16c360192bSMartin Schwidefsky {
17c360192bSMartin Schwidefsky 	return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
18c360192bSMartin Schwidefsky }
19c360192bSMartin Schwidefsky 
preempt_count_set(int pc)20*c9c26068SIlya Leoshkevich static __always_inline void preempt_count_set(int pc)
21c360192bSMartin Schwidefsky {
22c360192bSMartin Schwidefsky 	int old, new;
23c360192bSMartin Schwidefsky 
24c360192bSMartin Schwidefsky 	do {
25c360192bSMartin Schwidefsky 		old = READ_ONCE(S390_lowcore.preempt_count);
26c360192bSMartin Schwidefsky 		new = (old & PREEMPT_NEED_RESCHED) |
27c360192bSMartin Schwidefsky 			(pc & ~PREEMPT_NEED_RESCHED);
28c360192bSMartin Schwidefsky 	} while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
29c360192bSMartin Schwidefsky 				  old, new) != old);
30c360192bSMartin Schwidefsky }
31c360192bSMartin Schwidefsky 
set_preempt_need_resched(void)32*c9c26068SIlya Leoshkevich static __always_inline void set_preempt_need_resched(void)
33c360192bSMartin Schwidefsky {
34c360192bSMartin Schwidefsky 	__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
35c360192bSMartin Schwidefsky }
36c360192bSMartin Schwidefsky 
clear_preempt_need_resched(void)37*c9c26068SIlya Leoshkevich static __always_inline void clear_preempt_need_resched(void)
38c360192bSMartin Schwidefsky {
39c360192bSMartin Schwidefsky 	__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
40c360192bSMartin Schwidefsky }
41c360192bSMartin Schwidefsky 
test_preempt_need_resched(void)42*c9c26068SIlya Leoshkevich static __always_inline bool test_preempt_need_resched(void)
43c360192bSMartin Schwidefsky {
44c360192bSMartin Schwidefsky 	return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
45c360192bSMartin Schwidefsky }
46c360192bSMartin Schwidefsky 
__preempt_count_add(int val)47*c9c26068SIlya Leoshkevich static __always_inline void __preempt_count_add(int val)
48c360192bSMartin Schwidefsky {
4963678eecSHeiko Carstens 	/*
5063678eecSHeiko Carstens 	 * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
5163678eecSHeiko Carstens 	 * enabled, gcc 12 fails to handle __builtin_constant_p().
5263678eecSHeiko Carstens 	 */
5363678eecSHeiko Carstens 	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
5463678eecSHeiko Carstens 		if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
55c360192bSMartin Schwidefsky 			__atomic_add_const(val, &S390_lowcore.preempt_count);
5663678eecSHeiko Carstens 			return;
5763678eecSHeiko Carstens 		}
5863678eecSHeiko Carstens 	}
59c360192bSMartin Schwidefsky 	__atomic_add(val, &S390_lowcore.preempt_count);
60c360192bSMartin Schwidefsky }
61c360192bSMartin Schwidefsky 
__preempt_count_sub(int val)62*c9c26068SIlya Leoshkevich static __always_inline void __preempt_count_sub(int val)
63c360192bSMartin Schwidefsky {
64c360192bSMartin Schwidefsky 	__preempt_count_add(-val);
65c360192bSMartin Schwidefsky }
66c360192bSMartin Schwidefsky 
__preempt_count_dec_and_test(void)67*c9c26068SIlya Leoshkevich static __always_inline bool __preempt_count_dec_and_test(void)
68c360192bSMartin Schwidefsky {
69c360192bSMartin Schwidefsky 	return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
70c360192bSMartin Schwidefsky }
71c360192bSMartin Schwidefsky 
should_resched(int preempt_offset)72*c9c26068SIlya Leoshkevich static __always_inline bool should_resched(int preempt_offset)
73c360192bSMartin Schwidefsky {
74c360192bSMartin Schwidefsky 	return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
75c360192bSMartin Schwidefsky 			preempt_offset);
76c360192bSMartin Schwidefsky }
77c360192bSMartin Schwidefsky 
78c360192bSMartin Schwidefsky #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
79c360192bSMartin Schwidefsky 
80c360192bSMartin Schwidefsky #define PREEMPT_ENABLED	(0)
81c360192bSMartin Schwidefsky 
preempt_count(void)82*c9c26068SIlya Leoshkevich static __always_inline int preempt_count(void)
83c360192bSMartin Schwidefsky {
84c360192bSMartin Schwidefsky 	return READ_ONCE(S390_lowcore.preempt_count);
85c360192bSMartin Schwidefsky }
86c360192bSMartin Schwidefsky 
preempt_count_set(int pc)87*c9c26068SIlya Leoshkevich static __always_inline void preempt_count_set(int pc)
88c360192bSMartin Schwidefsky {
89c360192bSMartin Schwidefsky 	S390_lowcore.preempt_count = pc;
90c360192bSMartin Schwidefsky }
91c360192bSMartin Schwidefsky 
set_preempt_need_resched(void)92*c9c26068SIlya Leoshkevich static __always_inline void set_preempt_need_resched(void)
93c360192bSMartin Schwidefsky {
94c360192bSMartin Schwidefsky }
95c360192bSMartin Schwidefsky 
clear_preempt_need_resched(void)96*c9c26068SIlya Leoshkevich static __always_inline void clear_preempt_need_resched(void)
97c360192bSMartin Schwidefsky {
98c360192bSMartin Schwidefsky }
99c360192bSMartin Schwidefsky 
test_preempt_need_resched(void)100*c9c26068SIlya Leoshkevich static __always_inline bool test_preempt_need_resched(void)
101c360192bSMartin Schwidefsky {
102c360192bSMartin Schwidefsky 	return false;
103c360192bSMartin Schwidefsky }
104c360192bSMartin Schwidefsky 
__preempt_count_add(int val)105*c9c26068SIlya Leoshkevich static __always_inline void __preempt_count_add(int val)
106c360192bSMartin Schwidefsky {
107c360192bSMartin Schwidefsky 	S390_lowcore.preempt_count += val;
108c360192bSMartin Schwidefsky }
109c360192bSMartin Schwidefsky 
__preempt_count_sub(int val)110*c9c26068SIlya Leoshkevich static __always_inline void __preempt_count_sub(int val)
111c360192bSMartin Schwidefsky {
112c360192bSMartin Schwidefsky 	S390_lowcore.preempt_count -= val;
113c360192bSMartin Schwidefsky }
114c360192bSMartin Schwidefsky 
__preempt_count_dec_and_test(void)115*c9c26068SIlya Leoshkevich static __always_inline bool __preempt_count_dec_and_test(void)
116c360192bSMartin Schwidefsky {
117c360192bSMartin Schwidefsky 	return !--S390_lowcore.preempt_count && tif_need_resched();
118c360192bSMartin Schwidefsky }
119c360192bSMartin Schwidefsky 
should_resched(int preempt_offset)120*c9c26068SIlya Leoshkevich static __always_inline bool should_resched(int preempt_offset)
121c360192bSMartin Schwidefsky {
122c360192bSMartin Schwidefsky 	return unlikely(preempt_count() == preempt_offset &&
123c360192bSMartin Schwidefsky 			tif_need_resched());
124c360192bSMartin Schwidefsky }
125c360192bSMartin Schwidefsky 
126c360192bSMartin Schwidefsky #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
127c360192bSMartin Schwidefsky 
1286a942f57SValentin Schneider #define init_task_preempt_count(p)	do { } while (0)
1296a942f57SValentin Schneider /* Deferred to CPU bringup time */
1306a942f57SValentin Schneider #define init_idle_preempt_count(p, cpu)	do { } while (0)
1316a942f57SValentin Schneider 
132fa686453SThomas Gleixner #ifdef CONFIG_PREEMPTION
13339589adaSSven Schnelle extern void preempt_schedule(void);
134c360192bSMartin Schwidefsky #define __preempt_schedule() preempt_schedule()
13539589adaSSven Schnelle extern void preempt_schedule_notrace(void);
136c360192bSMartin Schwidefsky #define __preempt_schedule_notrace() preempt_schedule_notrace()
137fa686453SThomas Gleixner #endif /* CONFIG_PREEMPTION */
138c360192bSMartin Schwidefsky 
139c360192bSMartin Schwidefsky #endif /* __ASM_PREEMPT_H */
140