xref: /linux/arch/arm/include/asm/arch_timer.h (revision 44f57d78)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASMARM_ARCH_TIMER_H
3 #define __ASMARM_ARCH_TIMER_H
4 
5 #include <asm/barrier.h>
6 #include <asm/errno.h>
7 #include <linux/clocksource.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 
11 #include <clocksource/arm_arch_timer.h>
12 
13 #ifdef CONFIG_ARM_ARCH_TIMER
14 /* 32bit ARM doesn't know anything about timer errata... */
15 #define has_erratum_handler(h)		(false)
16 #define erratum_handler(h)		(arch_timer_##h)
17 
18 int arch_timer_arch_init(void);
19 
20 /*
21  * These register accessors are marked inline so the compiler can
22  * nicely work out which register we want, and chuck away the rest of
23  * the code. At least it does so with a recent GCC (4.6.3).
24  */
25 static __always_inline
26 void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
27 {
28 	if (access == ARCH_TIMER_PHYS_ACCESS) {
29 		switch (reg) {
30 		case ARCH_TIMER_REG_CTRL:
31 			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
32 			break;
33 		case ARCH_TIMER_REG_TVAL:
34 			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
35 			break;
36 		}
37 	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
38 		switch (reg) {
39 		case ARCH_TIMER_REG_CTRL:
40 			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
41 			break;
42 		case ARCH_TIMER_REG_TVAL:
43 			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
44 			break;
45 		}
46 	}
47 
48 	isb();
49 }
50 
51 static __always_inline
52 u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
53 {
54 	u32 val = 0;
55 
56 	if (access == ARCH_TIMER_PHYS_ACCESS) {
57 		switch (reg) {
58 		case ARCH_TIMER_REG_CTRL:
59 			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
60 			break;
61 		case ARCH_TIMER_REG_TVAL:
62 			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
63 			break;
64 		}
65 	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
66 		switch (reg) {
67 		case ARCH_TIMER_REG_CTRL:
68 			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
69 			break;
70 		case ARCH_TIMER_REG_TVAL:
71 			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
72 			break;
73 		}
74 	}
75 
76 	return val;
77 }
78 
79 static inline u32 arch_timer_get_cntfrq(void)
80 {
81 	u32 val;
82 	asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
83 	return val;
84 }
85 
86 static inline u64 __arch_counter_get_cntpct(void)
87 {
88 	u64 cval;
89 
90 	isb();
91 	asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
92 	return cval;
93 }
94 
95 static inline u64 __arch_counter_get_cntpct_stable(void)
96 {
97 	return __arch_counter_get_cntpct();
98 }
99 
100 static inline u64 __arch_counter_get_cntvct(void)
101 {
102 	u64 cval;
103 
104 	isb();
105 	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
106 	return cval;
107 }
108 
109 static inline u64 __arch_counter_get_cntvct_stable(void)
110 {
111 	return __arch_counter_get_cntvct();
112 }
113 
114 static inline u32 arch_timer_get_cntkctl(void)
115 {
116 	u32 cntkctl;
117 	asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
118 	return cntkctl;
119 }
120 
121 static inline void arch_timer_set_cntkctl(u32 cntkctl)
122 {
123 	asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
124 	isb();
125 }
126 
127 #endif
128 
129 #endif
130