xref: /openbsd/sys/arch/arm64/include/cpu.h (revision e5dd7070)
1 /* $OpenBSD: cpu.h,v 1.18 2020/06/05 23:16:24 naddy Exp $ */
2 /*
3  * Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _MACHINE_CPU_H_
19 #define _MACHINE_CPU_H_
20 
21 /*
22  * User-visible definitions
23  */
24 
25 /*
26  * CTL_MACHDEP definitions.
27  */
28 #define	CPU_COMPATIBLE		1	/* compatible property */
29 #define	CPU_MAXID		2	/* number of valid machdep ids */
30 
31 #define	CTL_MACHDEP_NAMES { \
32 	{ 0, 0 }, \
33 	{ "compatible", CTLTYPE_STRING }, \
34 }
35 
36 #ifdef _KERNEL
37 
38 /*
39  * Kernel-only definitions
40  */
41 
42 #include <machine/intr.h>
43 #include <machine/frame.h>
44 #include <machine/armreg.h>
45 
46 /* All the CLKF_* macros take a struct clockframe * as an argument. */
47 
48 #define clockframe trapframe
49 /*
50  * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
51  * frame came from USR mode or not.
52  */
53 #define CLKF_USERMODE(frame)	((frame->tf_elr & (1ul << 63)) == 0)
54 
55 /*
56  * CLKF_INTR: True if we took the interrupt from inside another
57  * interrupt handler.
58  */
59 #define CLKF_INTR(frame)	(curcpu()->ci_idepth > 1)
60 
61 /*
62  * CLKF_PC: Extract the program counter from a clockframe
63  */
64 #define CLKF_PC(frame)		(frame->tf_elr)
65 
66 /*
67  * PROC_PC: Find out the program counter for the given process.
68  */
69 #define PROC_PC(p)	((p)->p_addr->u_pcb.pcb_tf->tf_elr)
70 #define PROC_STACK(p)	((p)->p_addr->u_pcb.pcb_tf->tf_sp)
71 
72 /* The address of the vector page. */
73 extern vaddr_t vector_page;
74 void	arm32_vector_init(vaddr_t, int);
75 
76 /*
77  * Per-CPU information.  For now we assume one CPU.
78  */
79 
80 #include <sys/device.h>
81 #include <sys/sched.h>
82 #include <sys/srp.h>
83 
84 struct cpu_info {
85 	struct device		*ci_dev; /* Device corresponding to this CPU */
86 	struct cpu_info		*ci_next;
87 	struct schedstate_percpu ci_schedstate; /* scheduler state */
88 
89 	u_int32_t		ci_cpuid;
90 	uint64_t		ci_mpidr;
91 	u_int			ci_acpi_proc_id;
92 	int			ci_node;
93 	struct cpu_info		*ci_self;
94 
95 	struct proc		*ci_curproc;
96 	struct pmap		*ci_curpm;
97 	struct proc		*ci_fpuproc;
98 	u_int32_t		ci_randseed;
99 
100 	struct pcb		*ci_curpcb;
101 	struct pcb		*ci_idle_pcb;
102 
103 	u_int32_t		ci_ctrl; /* The CPU control register */
104 
105 	uint32_t		ci_cpl;
106 	uint32_t		ci_ipending;
107 	uint32_t		ci_idepth;
108 #ifdef DIAGNOSTIC
109 	int			ci_mutex_level;
110 #endif
111 	int			ci_want_resched;
112 
113 	void			(*ci_flush_bp)(void);
114 
115 	struct opp_table	*ci_opp_table;
116 	volatile int		ci_opp_idx;
117 	volatile int		ci_opp_max;
118 	uint32_t		ci_cpu_supply;
119 
120 #ifdef MULTIPROCESSOR
121 	struct srp_hazard	ci_srp_hazards[SRP_HAZARD_NUM];
122 	volatile int		ci_flags;
123 	uint64_t		ci_ttbr1;
124 	vaddr_t			ci_el1_stkend;
125 
126 	volatile int		ci_ddb_paused;
127 #define CI_DDB_RUNNING		0
128 #define CI_DDB_SHOULDSTOP	1
129 #define CI_DDB_STOPPED		2
130 #define CI_DDB_ENTERDDB		3
131 #define CI_DDB_INDDB		4
132 
133 #endif
134 
135 #ifdef GPROF
136 	struct gmonparam	*ci_gmon;
137 #endif
138 };
139 
140 #define CPUF_PRIMARY 		(1<<0)
141 #define CPUF_AP	 		(1<<1)
142 #define CPUF_IDENTIFY		(1<<2)
143 #define CPUF_IDENTIFIED		(1<<3)
144 #define CPUF_PRESENT		(1<<4)
145 #define CPUF_GO			(1<<5)
146 #define CPUF_RUNNING		(1<<6)
147 
148 static inline struct cpu_info *
149 curcpu(void)
150 {
151 	struct cpu_info *__ci = NULL;
152 	__asm __volatile("mrs %0, tpidr_el1" : "=r" (__ci));
153 	return (__ci);
154 }
155 
156 extern struct cpu_info cpu_info_primary;
157 extern struct cpu_info *cpu_info_list;
158 
159 #ifndef MULTIPROCESSOR
160 #define cpu_number()	0
161 #define CPU_IS_PRIMARY(ci)	1
162 #define CPU_INFO_ITERATOR	int
163 #define CPU_INFO_FOREACH(cii, ci) \
164 	for (cii = 0, ci = curcpu(); ci != NULL; ci = NULL)
165 #define CPU_INFO_UNIT(ci)	0
166 #define MAXCPUS	1
167 #define cpu_unidle(ci)
168 #else
169 #define cpu_number()		(curcpu()->ci_cpuid)
170 #define CPU_IS_PRIMARY(ci)	((ci) == &cpu_info_primary)
171 #define CPU_INFO_ITERATOR		int
172 #define CPU_INFO_FOREACH(cii, ci)	for (cii = 0, ci = cpu_info_list; \
173 					    ci != NULL; ci = ci->ci_next)
174 #define CPU_INFO_UNIT(ci)	((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0)
175 #define MAXCPUS	32
176 
177 extern struct cpu_info *cpu_info[MAXCPUS];
178 
179 void cpu_boot_secondary_processors(void);
180 #endif /* !MULTIPROCESSOR */
181 
182 #define CPU_BUSY_CYCLE()	__asm volatile("yield" : : : "memory")
183 
184 #define curpcb		curcpu()->ci_curpcb
185 
186 static inline unsigned int
187 cpu_rnd_messybits(void)
188 {
189 	uint64_t val, rval;
190 
191 	__asm volatile("mrs %0, CNTVCT_EL0; rbit %1, %0;"
192 	    : "=r" (val), "=r" (rval));
193 
194 	return (val ^ rval);
195 }
196 
197 /*
198  * Scheduling glue
199  */
200 #define aston(p)        ((p)->p_md.md_astpending = 1)
201 #define	setsoftast()	aston(curcpu()->ci_curproc)
202 
203 /*
204  * Notify the current process (p) that it has a signal pending,
205  * process as soon as possible.
206  */
207 
208 #ifdef MULTIPROCESSOR
209 void cpu_unidle(struct cpu_info *ci);
210 #define signotify(p)            (aston(p), cpu_unidle((p)->p_cpu))
211 void cpu_kick(struct cpu_info *);
212 #else
213 #define cpu_kick(ci)
214 #define cpu_unidle(ci)
215 #define signotify(p)            setsoftast()
216 #endif
217 
218 /*
219  * Preempt the current process if in interrupt from user mode,
220  * or after the current trap/syscall if in system mode.
221  */
222 void need_resched(struct cpu_info *);
223 #define clear_resched(ci) 	((ci)->ci_want_resched = 0)
224 
225 /*
226  * Give a profiling tick to the current process when the user profiling
227  * buffer pages are invalid.  On the i386, request an ast to send us
228  * through trap(), marking the proc as needing a profiling tick.
229  */
230 #define	need_proftick(p)	aston(p)
231 
232 // asm code to start new kernel contexts.
233 void	proc_trampoline(void);
234 void	child_trampoline(void);
235 
236 /*
237  * Random cruft
238  */
239 void	dumpconf(void);
240 
241 // cpuswitch.S
242 struct pcb;
243 void	savectx		(struct pcb *pcb);
244 
245 // machdep.h
246 void bootsync		(int);
247 
248 // fault.c
249 int badaddr_read	(void *, size_t, void *);
250 
251 // syscall.c
252 void svc_handler	(trapframe_t *);
253 
254 /* machine_machdep.c */
255 void board_startup(void);
256 
257 // functions to manipulate interrupt state
258 static __inline uint32_t
259 get_daif()
260 {
261 	uint32_t daif;
262 
263 	__asm volatile ("mrs %x0, daif": "=r"(daif));
264 	return daif;
265 }
266 
267 static __inline void
268 restore_daif(uint32_t daif)
269 {
270 	__asm volatile ("msr daif, %x0":: "r"(daif));
271 }
272 
273 static __inline void
274 enable_irq_daif()
275 {
276 	__asm volatile ("msr daifclr, #2");
277 }
278 
279 static __inline void
280 disable_irq_daif()
281 {
282 	__asm volatile ("msr daifset, #2");
283 }
284 
285 static __inline uint32_t
286 disable_irq_daif_ret()
287 {
288 	uint32_t daif;
289 	__asm volatile ("mrs %x0, daif": "=r"(daif));
290 	__asm volatile ("msr daifset, #2");
291 	return daif;
292 }
293 
294 #define get_interrupts(mask)						\
295 	(__get_daif())
296 
297 #define disable_interrupts()						\
298 	disable_irq_daif_ret()
299 
300 #define enable_interrupts()						\
301 	enable_irq_daif()
302 
303 #define restore_interrupts(old_daif)					\
304 	restore_daif(old_daif)
305 
306 static inline void
307 intr_enable(void)
308 {
309 	enable_irq_daif();
310 }
311 
312 static inline u_long
313 intr_disable(void)
314 {
315 	return disable_irq_daif_ret();
316 }
317 
318 static inline void
319 intr_restore(u_long daif)
320 {
321 	restore_daif(daif);
322 }
323 
324 void	cpu_startclock(void);
325 
326 void	delay (unsigned);
327 #define	DELAY(x)	delay(x)
328 
329 #endif /* _KERNEL */
330 
331 #ifdef MULTIPROCESSOR
332 #include <sys/mplock.h>
333 #endif /* MULTIPROCESSOR */
334 
335 #endif /* !_MACHINE_CPU_H_ */
336