xref: /linux/arch/powerpc/include/asm/delay.h (revision 0be3ff0c)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_DELAY_H
3 #define _ASM_POWERPC_DELAY_H
4 #ifdef __KERNEL__
5 
6 #include <linux/processor.h>
7 #include <asm/time.h>
8 
9 /*
10  * Copyright 1996, Paul Mackerras.
11  * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved.
12  *
13  * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan,
14  * Anton Blanchard.
15  */
16 
17 extern void __delay(unsigned long loops);
18 extern void udelay(unsigned long usecs);
19 
20 /*
21  * On shared processor machines the generic implementation of mdelay can
22  * result in large errors. While each iteration of the loop inside mdelay
23  * is supposed to take 1ms, the hypervisor could sleep our partition for
24  * longer (eg 10ms). With the right timing these errors can add up.
25  *
26  * Since there is no 32bit overflow issue on 64bit kernels, just call
27  * udelay directly.
28  */
29 #ifdef CONFIG_PPC64
30 #define mdelay(n)	udelay((n) * 1000)
31 #endif
32 
33 /**
34  * spin_event_timeout - spin until a condition gets true or a timeout elapses
35  * @condition: a C expression to evalate
36  * @timeout: timeout, in microseconds
37  * @delay: the number of microseconds to delay between each evaluation of
38  *         @condition
39  *
40  * The process spins until the condition evaluates to true (non-zero) or the
41  * timeout elapses.  The return value of this macro is the value of
42  * @condition when the loop terminates. This allows you to determine the cause
43  * of the loop terminates.  If the return value is zero, then you know a
44  * timeout has occurred.
45  *
46  * This primary purpose of this macro is to poll on a hardware register
47  * until a status bit changes.  The timeout ensures that the loop still
48  * terminates even if the bit never changes.  The delay is for devices that
49  * need a delay in between successive reads.
50  *
51  * gcc will optimize out the if-statement if @delay is a constant.
52  */
53 #define spin_event_timeout(condition, timeout, delay)                          \
54 ({                                                                             \
55 	typeof(condition) __ret;                                               \
56 	unsigned long __loops = tb_ticks_per_usec * timeout;                   \
57 	unsigned long __start = mftb();                                     \
58                                                                                \
59 	if (delay) {                                                           \
60 		while (!(__ret = (condition)) &&                               \
61 				(tb_ticks_since(__start) <= __loops))          \
62 			udelay(delay);                                         \
63 	} else {                                                               \
64 		spin_begin();                                                  \
65 		while (!(__ret = (condition)) &&                               \
66 				(tb_ticks_since(__start) <= __loops))          \
67 			spin_cpu_relax();                                      \
68 		spin_end();                                                    \
69 	}                                                                      \
70 	if (!__ret)                                                            \
71 		__ret = (condition);                                           \
72 	__ret;		                                                       \
73 })
74 
75 #endif /* __KERNEL__ */
76 #endif /* _ASM_POWERPC_DELAY_H */
77