xref: /freebsd/sys/arm64/arm64/machdep.c (revision e17f5b1d)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_acpi.h"
29 #include "opt_platform.h"
30 #include "opt_ddb.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/cons.h>
40 #include <sys/cpu.h>
41 #include <sys/csan.h>
42 #include <sys/devmap.h>
43 #include <sys/efi.h>
44 #include <sys/exec.h>
45 #include <sys/imgact.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/limits.h>
50 #include <sys/linker.h>
51 #include <sys/msgbuf.h>
52 #include <sys/pcpu.h>
53 #include <sys/physmem.h>
54 #include <sys/proc.h>
55 #include <sys/ptrace.h>
56 #include <sys/reboot.h>
57 #include <sys/rwlock.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
60 #include <sys/syscallsubr.h>
61 #include <sys/sysent.h>
62 #include <sys/sysproto.h>
63 #include <sys/ucontext.h>
64 #include <sys/vdso.h>
65 #include <sys/vmmeter.h>
66 
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_phys.h>
73 #include <vm/pmap.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_pager.h>
76 
77 #include <machine/armreg.h>
78 #include <machine/cpu.h>
79 #include <machine/debug_monitor.h>
80 #include <machine/kdb.h>
81 #include <machine/machdep.h>
82 #include <machine/metadata.h>
83 #include <machine/md_var.h>
84 #include <machine/pcb.h>
85 #include <machine/reg.h>
86 #include <machine/undefined.h>
87 #include <machine/vmparam.h>
88 
89 #ifdef VFP
90 #include <machine/vfp.h>
91 #endif
92 
93 #ifdef DEV_ACPI
94 #include <contrib/dev/acpica/include/acpi.h>
95 #include <machine/acpica_machdep.h>
96 #endif
97 
98 #ifdef FDT
99 #include <dev/fdt/fdt_common.h>
100 #include <dev/ofw/openfirm.h>
101 #endif
102 
103 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
104 static void set_fpcontext(struct thread *td, mcontext_t *mcp);
105 
106 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
107 
108 struct pcpu __pcpu[MAXCPU];
109 
110 static struct trapframe proc0_tf;
111 
112 int early_boot = 1;
113 int cold = 1;
114 static int boot_el;
115 
116 struct kva_md_info kmi;
117 
118 int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
119 int has_pan;
120 
121 /*
122  * Physical address of the EFI System Table. Stashed from the metadata hints
123  * passed into the kernel and used by the EFI code to call runtime services.
124  */
125 vm_paddr_t efi_systbl_phys;
126 static struct efi_map_header *efihdr;
127 
128 /* pagezero_* implementations are provided in support.S */
129 void pagezero_simple(void *);
130 void pagezero_cache(void *);
131 
132 /* pagezero_simple is default pagezero */
133 void (*pagezero)(void *p) = pagezero_simple;
134 
135 static void
136 pan_setup(void)
137 {
138 	uint64_t id_aa64mfr1;
139 
140 	id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
141 	if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
142 		has_pan = 1;
143 }
144 
145 void
146 pan_enable(void)
147 {
148 
149 	/*
150 	 * The LLVM integrated assembler doesn't understand the PAN
151 	 * PSTATE field. Because of this we need to manually create
152 	 * the instruction in an asm block. This is equivalent to:
153 	 * msr pan, #1
154 	 *
155 	 * This sets the PAN bit, stopping the kernel from accessing
156 	 * memory when userspace can also access it unless the kernel
157 	 * uses the userspace load/store instructions.
158 	 */
159 	if (has_pan) {
160 		WRITE_SPECIALREG(sctlr_el1,
161 		    READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
162 		__asm __volatile(".inst 0xd500409f | (0x1 << 8)");
163 	}
164 }
165 
166 bool
167 has_hyp(void)
168 {
169 
170 	return (boot_el == 2);
171 }
172 
173 static void
174 cpu_startup(void *dummy)
175 {
176 	vm_paddr_t size;
177 	int i;
178 
179 	printf("real memory  = %ju (%ju MB)\n", ptoa((uintmax_t)realmem),
180 	    ptoa((uintmax_t)realmem) / 1024 / 1024);
181 
182 	if (bootverbose) {
183 		printf("Physical memory chunk(s):\n");
184 		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
185 			size = phys_avail[i + 1] - phys_avail[i];
186 			printf("%#016jx - %#016jx, %ju bytes (%ju pages)\n",
187 			    (uintmax_t)phys_avail[i],
188 			    (uintmax_t)phys_avail[i + 1] - 1,
189 			    (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
190 		}
191 	}
192 
193 	printf("avail memory = %ju (%ju MB)\n",
194 	    ptoa((uintmax_t)vm_free_count()),
195 	    ptoa((uintmax_t)vm_free_count()) / 1024 / 1024);
196 
197 	undef_init();
198 	install_cpu_errata();
199 
200 	vm_ksubmap_init(&kmi);
201 	bufinit();
202 	vm_pager_bufferinit();
203 }
204 
205 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
206 
207 static void
208 late_ifunc_resolve(void *dummy __unused)
209 {
210 	link_elf_late_ireloc();
211 }
212 SYSINIT(late_ifunc_resolve, SI_SUB_CPU, SI_ORDER_ANY, late_ifunc_resolve, NULL);
213 
214 int
215 cpu_idle_wakeup(int cpu)
216 {
217 
218 	return (0);
219 }
220 
221 int
222 fill_regs(struct thread *td, struct reg *regs)
223 {
224 	struct trapframe *frame;
225 
226 	frame = td->td_frame;
227 	regs->sp = frame->tf_sp;
228 	regs->lr = frame->tf_lr;
229 	regs->elr = frame->tf_elr;
230 	regs->spsr = frame->tf_spsr;
231 
232 	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
233 
234 #ifdef COMPAT_FREEBSD32
235 	/*
236 	 * We may be called here for a 32bits process, if we're using a
237 	 * 64bits debugger. If so, put PC and SPSR where it expects it.
238 	 */
239 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
240 		regs->x[15] = frame->tf_elr;
241 		regs->x[16] = frame->tf_spsr;
242 	}
243 #endif
244 	return (0);
245 }
246 
247 int
248 set_regs(struct thread *td, struct reg *regs)
249 {
250 	struct trapframe *frame;
251 
252 	frame = td->td_frame;
253 	frame->tf_sp = regs->sp;
254 	frame->tf_lr = regs->lr;
255 	frame->tf_elr = regs->elr;
256 	frame->tf_spsr &= ~PSR_FLAGS;
257 	frame->tf_spsr |= regs->spsr & PSR_FLAGS;
258 
259 	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
260 
261 #ifdef COMPAT_FREEBSD32
262 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
263 		/*
264 		 * We may be called for a 32bits process if we're using
265 		 * a 64bits debugger. If so, get PC and SPSR from where
266 		 * it put it.
267 		 */
268 		frame->tf_elr = regs->x[15];
269 		frame->tf_spsr = regs->x[16] & PSR_FLAGS;
270 	}
271 #endif
272 	return (0);
273 }
274 
275 int
276 fill_fpregs(struct thread *td, struct fpreg *regs)
277 {
278 #ifdef VFP
279 	struct pcb *pcb;
280 
281 	pcb = td->td_pcb;
282 	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
283 		/*
284 		 * If we have just been running VFP instructions we will
285 		 * need to save the state to memcpy it below.
286 		 */
287 		if (td == curthread)
288 			vfp_save_state(td, pcb);
289 
290 		KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
291 		    ("Called fill_fpregs while the kernel is using the VFP"));
292 		memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
293 		    sizeof(regs->fp_q));
294 		regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
295 		regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
296 	} else
297 #endif
298 		memset(regs, 0, sizeof(*regs));
299 	return (0);
300 }
301 
302 int
303 set_fpregs(struct thread *td, struct fpreg *regs)
304 {
305 #ifdef VFP
306 	struct pcb *pcb;
307 
308 	pcb = td->td_pcb;
309 	KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
310 	    ("Called set_fpregs while the kernel is using the VFP"));
311 	memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
312 	pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
313 	pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
314 #endif
315 	return (0);
316 }
317 
318 int
319 fill_dbregs(struct thread *td, struct dbreg *regs)
320 {
321 	struct debug_monitor_state *monitor;
322 	int count, i;
323 	uint8_t debug_ver, nbkpts;
324 
325 	memset(regs, 0, sizeof(*regs));
326 
327 	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_DebugVer_SHIFT,
328 	    &debug_ver);
329 	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_BRPs_SHIFT,
330 	    &nbkpts);
331 
332 	/*
333 	 * The BRPs field contains the number of breakpoints - 1. Armv8-A
334 	 * allows the hardware to provide 2-16 breakpoints so this won't
335 	 * overflow an 8 bit value.
336 	 */
337 	count = nbkpts + 1;
338 
339 	regs->db_info = debug_ver;
340 	regs->db_info <<= 8;
341 	regs->db_info |= count;
342 
343 	monitor = &td->td_pcb->pcb_dbg_regs;
344 	if ((monitor->dbg_flags & DBGMON_ENABLED) != 0) {
345 		for (i = 0; i < count; i++) {
346 			regs->db_regs[i].dbr_addr = monitor->dbg_bvr[i];
347 			regs->db_regs[i].dbr_ctrl = monitor->dbg_bcr[i];
348 		}
349 	}
350 
351 	return (0);
352 }
353 
354 int
355 set_dbregs(struct thread *td, struct dbreg *regs)
356 {
357 	struct debug_monitor_state *monitor;
358 	int count;
359 	int i;
360 
361 	monitor = &td->td_pcb->pcb_dbg_regs;
362 	count = 0;
363 	monitor->dbg_enable_count = 0;
364 	for (i = 0; i < DBG_BRP_MAX; i++) {
365 		/* TODO: Check these values */
366 		monitor->dbg_bvr[i] = regs->db_regs[i].dbr_addr;
367 		monitor->dbg_bcr[i] = regs->db_regs[i].dbr_ctrl;
368 		if ((monitor->dbg_bcr[i] & 1) != 0)
369 			monitor->dbg_enable_count++;
370 	}
371 	if (monitor->dbg_enable_count > 0)
372 		monitor->dbg_flags |= DBGMON_ENABLED;
373 
374 	return (0);
375 }
376 
377 #ifdef COMPAT_FREEBSD32
378 int
379 fill_regs32(struct thread *td, struct reg32 *regs)
380 {
381 	int i;
382 	struct trapframe *tf;
383 
384 	tf = td->td_frame;
385 	for (i = 0; i < 13; i++)
386 		regs->r[i] = tf->tf_x[i];
387 	/* For arm32, SP is r13 and LR is r14 */
388 	regs->r_sp = tf->tf_x[13];
389 	regs->r_lr = tf->tf_x[14];
390 	regs->r_pc = tf->tf_elr;
391 	regs->r_cpsr = tf->tf_spsr;
392 
393 	return (0);
394 }
395 
396 int
397 set_regs32(struct thread *td, struct reg32 *regs)
398 {
399 	int i;
400 	struct trapframe *tf;
401 
402 	tf = td->td_frame;
403 	for (i = 0; i < 13; i++)
404 		tf->tf_x[i] = regs->r[i];
405 	/* For arm 32, SP is r13 an LR is r14 */
406 	tf->tf_x[13] = regs->r_sp;
407 	tf->tf_x[14] = regs->r_lr;
408 	tf->tf_elr = regs->r_pc;
409 	tf->tf_spsr = regs->r_cpsr;
410 
411 
412 	return (0);
413 }
414 
415 int
416 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
417 {
418 
419 	printf("ARM64TODO: fill_fpregs32");
420 	return (EDOOFUS);
421 }
422 
423 int
424 set_fpregs32(struct thread *td, struct fpreg32 *regs)
425 {
426 
427 	printf("ARM64TODO: set_fpregs32");
428 	return (EDOOFUS);
429 }
430 
431 int
432 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
433 {
434 
435 	printf("ARM64TODO: fill_dbregs32");
436 	return (EDOOFUS);
437 }
438 
439 int
440 set_dbregs32(struct thread *td, struct dbreg32 *regs)
441 {
442 
443 	printf("ARM64TODO: set_dbregs32");
444 	return (EDOOFUS);
445 }
446 #endif
447 
448 int
449 ptrace_set_pc(struct thread *td, u_long addr)
450 {
451 
452 	td->td_frame->tf_elr = addr;
453 	return (0);
454 }
455 
456 int
457 ptrace_single_step(struct thread *td)
458 {
459 
460 	td->td_frame->tf_spsr |= PSR_SS;
461 	td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
462 	return (0);
463 }
464 
465 int
466 ptrace_clear_single_step(struct thread *td)
467 {
468 
469 	td->td_frame->tf_spsr &= ~PSR_SS;
470 	td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
471 	return (0);
472 }
473 
474 void
475 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
476 {
477 	struct trapframe *tf = td->td_frame;
478 
479 	memset(tf, 0, sizeof(struct trapframe));
480 
481 	tf->tf_x[0] = stack;
482 	tf->tf_sp = STACKALIGN(stack);
483 	tf->tf_lr = imgp->entry_addr;
484 	tf->tf_elr = imgp->entry_addr;
485 }
486 
487 /* Sanity check these are the same size, they will be memcpy'd to and fro */
488 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
489     sizeof((struct gpregs *)0)->gp_x);
490 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
491     sizeof((struct reg *)0)->x);
492 
493 int
494 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
495 {
496 	struct trapframe *tf = td->td_frame;
497 
498 	if (clear_ret & GET_MC_CLEAR_RET) {
499 		mcp->mc_gpregs.gp_x[0] = 0;
500 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
501 	} else {
502 		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
503 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
504 	}
505 
506 	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
507 	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
508 
509 	mcp->mc_gpregs.gp_sp = tf->tf_sp;
510 	mcp->mc_gpregs.gp_lr = tf->tf_lr;
511 	mcp->mc_gpregs.gp_elr = tf->tf_elr;
512 	get_fpcontext(td, mcp);
513 
514 	return (0);
515 }
516 
517 int
518 set_mcontext(struct thread *td, mcontext_t *mcp)
519 {
520 	struct trapframe *tf = td->td_frame;
521 	uint32_t spsr;
522 
523 	spsr = mcp->mc_gpregs.gp_spsr;
524 	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
525 	    (spsr & PSR_AARCH32) != 0 ||
526 	    (spsr & PSR_DAIF) != (td->td_frame->tf_spsr & PSR_DAIF))
527 		return (EINVAL);
528 
529 	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
530 
531 	tf->tf_sp = mcp->mc_gpregs.gp_sp;
532 	tf->tf_lr = mcp->mc_gpregs.gp_lr;
533 	tf->tf_elr = mcp->mc_gpregs.gp_elr;
534 	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
535 	set_fpcontext(td, mcp);
536 
537 	return (0);
538 }
539 
540 static void
541 get_fpcontext(struct thread *td, mcontext_t *mcp)
542 {
543 #ifdef VFP
544 	struct pcb *curpcb;
545 
546 	critical_enter();
547 
548 	curpcb = curthread->td_pcb;
549 
550 	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
551 		/*
552 		 * If we have just been running VFP instructions we will
553 		 * need to save the state to memcpy it below.
554 		 */
555 		vfp_save_state(td, curpcb);
556 
557 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
558 		    ("Called get_fpcontext while the kernel is using the VFP"));
559 		KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
560 		    ("Non-userspace FPU flags set in get_fpcontext"));
561 		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
562 		    sizeof(mcp->mc_fpregs));
563 		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
564 		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
565 		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
566 		mcp->mc_flags |= _MC_FP_VALID;
567 	}
568 
569 	critical_exit();
570 #endif
571 }
572 
573 static void
574 set_fpcontext(struct thread *td, mcontext_t *mcp)
575 {
576 #ifdef VFP
577 	struct pcb *curpcb;
578 
579 	critical_enter();
580 
581 	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
582 		curpcb = curthread->td_pcb;
583 
584 		/*
585 		 * Discard any vfp state for the current thread, we
586 		 * are about to override it.
587 		 */
588 		vfp_discard(td);
589 
590 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
591 		    ("Called set_fpcontext while the kernel is using the VFP"));
592 		memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
593 		    sizeof(mcp->mc_fpregs));
594 		curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
595 		curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
596 		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
597 	}
598 
599 	critical_exit();
600 #endif
601 }
602 
603 void
604 cpu_idle(int busy)
605 {
606 
607 	spinlock_enter();
608 	if (!busy)
609 		cpu_idleclock();
610 	if (!sched_runnable())
611 		__asm __volatile(
612 		    "dsb sy \n"
613 		    "wfi    \n");
614 	if (!busy)
615 		cpu_activeclock();
616 	spinlock_exit();
617 }
618 
619 void
620 cpu_halt(void)
621 {
622 
623 	/* We should have shutdown by now, if not enter a low power sleep */
624 	intr_disable();
625 	while (1) {
626 		__asm __volatile("wfi");
627 	}
628 }
629 
630 /*
631  * Flush the D-cache for non-DMA I/O so that the I-cache can
632  * be made coherent later.
633  */
634 void
635 cpu_flush_dcache(void *ptr, size_t len)
636 {
637 
638 	/* ARM64TODO TBD */
639 }
640 
641 /* Get current clock frequency for the given CPU ID. */
642 int
643 cpu_est_clockrate(int cpu_id, uint64_t *rate)
644 {
645 	struct pcpu *pc;
646 
647 	pc = pcpu_find(cpu_id);
648 	if (pc == NULL || rate == NULL)
649 		return (EINVAL);
650 
651 	if (pc->pc_clock == 0)
652 		return (EOPNOTSUPP);
653 
654 	*rate = pc->pc_clock;
655 	return (0);
656 }
657 
658 void
659 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
660 {
661 
662 	pcpu->pc_acpi_id = 0xffffffff;
663 }
664 
665 void
666 spinlock_enter(void)
667 {
668 	struct thread *td;
669 	register_t daif;
670 
671 	td = curthread;
672 	if (td->td_md.md_spinlock_count == 0) {
673 		daif = intr_disable();
674 		td->td_md.md_spinlock_count = 1;
675 		td->td_md.md_saved_daif = daif;
676 		critical_enter();
677 	} else
678 		td->td_md.md_spinlock_count++;
679 }
680 
681 void
682 spinlock_exit(void)
683 {
684 	struct thread *td;
685 	register_t daif;
686 
687 	td = curthread;
688 	daif = td->td_md.md_saved_daif;
689 	td->td_md.md_spinlock_count--;
690 	if (td->td_md.md_spinlock_count == 0) {
691 		critical_exit();
692 		intr_restore(daif);
693 	}
694 }
695 
696 #ifndef	_SYS_SYSPROTO_H_
697 struct sigreturn_args {
698 	ucontext_t *ucp;
699 };
700 #endif
701 
702 int
703 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
704 {
705 	ucontext_t uc;
706 	int error;
707 
708 	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
709 		return (EFAULT);
710 
711 	error = set_mcontext(td, &uc.uc_mcontext);
712 	if (error != 0)
713 		return (error);
714 
715 	/* Restore signal mask. */
716 	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
717 
718 	return (EJUSTRETURN);
719 }
720 
721 /*
722  * Construct a PCB from a trapframe. This is called from kdb_trap() where
723  * we want to start a backtrace from the function that caused us to enter
724  * the debugger. We have the context in the trapframe, but base the trace
725  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
726  * enough for a backtrace.
727  */
728 void
729 makectx(struct trapframe *tf, struct pcb *pcb)
730 {
731 	int i;
732 
733 	for (i = 0; i < PCB_LR; i++)
734 		pcb->pcb_x[i] = tf->tf_x[i];
735 
736 	pcb->pcb_x[PCB_LR] = tf->tf_lr;
737 	pcb->pcb_pc = tf->tf_elr;
738 	pcb->pcb_sp = tf->tf_sp;
739 }
740 
741 void
742 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
743 {
744 	struct thread *td;
745 	struct proc *p;
746 	struct trapframe *tf;
747 	struct sigframe *fp, frame;
748 	struct sigacts *psp;
749 	struct sysentvec *sysent;
750 	int onstack, sig;
751 
752 	td = curthread;
753 	p = td->td_proc;
754 	PROC_LOCK_ASSERT(p, MA_OWNED);
755 
756 	sig = ksi->ksi_signo;
757 	psp = p->p_sigacts;
758 	mtx_assert(&psp->ps_mtx, MA_OWNED);
759 
760 	tf = td->td_frame;
761 	onstack = sigonstack(tf->tf_sp);
762 
763 	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
764 	    catcher, sig);
765 
766 	/* Allocate and validate space for the signal handler context. */
767 	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
768 	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
769 		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
770 		    td->td_sigstk.ss_size);
771 #if defined(COMPAT_43)
772 		td->td_sigstk.ss_flags |= SS_ONSTACK;
773 #endif
774 	} else {
775 		fp = (struct sigframe *)td->td_frame->tf_sp;
776 	}
777 
778 	/* Make room, keeping the stack aligned */
779 	fp--;
780 	fp = (struct sigframe *)STACKALIGN(fp);
781 
782 	/* Fill in the frame to copy out */
783 	bzero(&frame, sizeof(frame));
784 	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
785 	frame.sf_si = ksi->ksi_info;
786 	frame.sf_uc.uc_sigmask = *mask;
787 	frame.sf_uc.uc_stack = td->td_sigstk;
788 	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
789 	    (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
790 	mtx_unlock(&psp->ps_mtx);
791 	PROC_UNLOCK(td->td_proc);
792 
793 	/* Copy the sigframe out to the user's stack. */
794 	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
795 		/* Process has trashed its stack. Kill it. */
796 		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
797 		PROC_LOCK(p);
798 		sigexit(td, SIGILL);
799 	}
800 
801 	tf->tf_x[0]= sig;
802 	tf->tf_x[1] = (register_t)&fp->sf_si;
803 	tf->tf_x[2] = (register_t)&fp->sf_uc;
804 
805 	tf->tf_elr = (register_t)catcher;
806 	tf->tf_sp = (register_t)fp;
807 	sysent = p->p_sysent;
808 	if (sysent->sv_sigcode_base != 0)
809 		tf->tf_lr = (register_t)sysent->sv_sigcode_base;
810 	else
811 		tf->tf_lr = (register_t)(sysent->sv_psstrings -
812 		    *(sysent->sv_szsigcode));
813 
814 	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
815 	    tf->tf_sp);
816 
817 	PROC_LOCK(p);
818 	mtx_lock(&psp->ps_mtx);
819 }
820 
821 static void
822 init_proc0(vm_offset_t kstack)
823 {
824 	struct pcpu *pcpup = &__pcpu[0];
825 
826 	proc_linkup0(&proc0, &thread0);
827 	thread0.td_kstack = kstack;
828 	thread0.td_kstack_pages = KSTACK_PAGES;
829 	thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
830 	    thread0.td_kstack_pages * PAGE_SIZE) - 1;
831 	thread0.td_pcb->pcb_fpflags = 0;
832 	thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
833 	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
834 	thread0.td_frame = &proc0_tf;
835 	pcpup->pc_curpcb = thread0.td_pcb;
836 }
837 
838 typedef struct {
839 	uint32_t type;
840 	uint64_t phys_start;
841 	uint64_t virt_start;
842 	uint64_t num_pages;
843 	uint64_t attr;
844 } EFI_MEMORY_DESCRIPTOR;
845 
846 typedef void (*efi_map_entry_cb)(struct efi_md *);
847 
848 static void
849 foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb)
850 {
851 	struct efi_md *map, *p;
852 	size_t efisz;
853 	int ndesc, i;
854 
855 	/*
856 	 * Memory map data provided by UEFI via the GetMemoryMap
857 	 * Boot Services API.
858 	 */
859 	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
860 	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
861 
862 	if (efihdr->descriptor_size == 0)
863 		return;
864 	ndesc = efihdr->memory_size / efihdr->descriptor_size;
865 
866 	for (i = 0, p = map; i < ndesc; i++,
867 	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
868 		cb(p);
869 	}
870 }
871 
872 static void
873 exclude_efi_map_entry(struct efi_md *p)
874 {
875 
876 	switch (p->md_type) {
877 	case EFI_MD_TYPE_CODE:
878 	case EFI_MD_TYPE_DATA:
879 	case EFI_MD_TYPE_BS_CODE:
880 	case EFI_MD_TYPE_BS_DATA:
881 	case EFI_MD_TYPE_FREE:
882 		/*
883 		 * We're allowed to use any entry with these types.
884 		 */
885 		break;
886 	default:
887 		physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE,
888 		    EXFLAG_NOALLOC);
889 	}
890 }
891 
892 static void
893 exclude_efi_map_entries(struct efi_map_header *efihdr)
894 {
895 
896 	foreach_efi_map_entry(efihdr, exclude_efi_map_entry);
897 }
898 
899 static void
900 add_efi_map_entry(struct efi_md *p)
901 {
902 
903 	switch (p->md_type) {
904 	case EFI_MD_TYPE_RT_DATA:
905 		/*
906 		 * Runtime data will be excluded after the DMAP
907 		 * region is created to stop it from being added
908 		 * to phys_avail.
909 		 */
910 	case EFI_MD_TYPE_CODE:
911 	case EFI_MD_TYPE_DATA:
912 	case EFI_MD_TYPE_BS_CODE:
913 	case EFI_MD_TYPE_BS_DATA:
914 	case EFI_MD_TYPE_FREE:
915 		/*
916 		 * We're allowed to use any entry with these types.
917 		 */
918 		physmem_hardware_region(p->md_phys,
919 		    p->md_pages * PAGE_SIZE);
920 		break;
921 	}
922 }
923 
924 static void
925 add_efi_map_entries(struct efi_map_header *efihdr)
926 {
927 
928 	foreach_efi_map_entry(efihdr, add_efi_map_entry);
929 }
930 
931 static void
932 print_efi_map_entry(struct efi_md *p)
933 {
934 	const char *type;
935 	static const char *types[] = {
936 		"Reserved",
937 		"LoaderCode",
938 		"LoaderData",
939 		"BootServicesCode",
940 		"BootServicesData",
941 		"RuntimeServicesCode",
942 		"RuntimeServicesData",
943 		"ConventionalMemory",
944 		"UnusableMemory",
945 		"ACPIReclaimMemory",
946 		"ACPIMemoryNVS",
947 		"MemoryMappedIO",
948 		"MemoryMappedIOPortSpace",
949 		"PalCode",
950 		"PersistentMemory"
951 	};
952 
953 	if (p->md_type < nitems(types))
954 		type = types[p->md_type];
955 	else
956 		type = "<INVALID>";
957 	printf("%23s %012lx %12p %08lx ", type, p->md_phys,
958 	    p->md_virt, p->md_pages);
959 	if (p->md_attr & EFI_MD_ATTR_UC)
960 		printf("UC ");
961 	if (p->md_attr & EFI_MD_ATTR_WC)
962 		printf("WC ");
963 	if (p->md_attr & EFI_MD_ATTR_WT)
964 		printf("WT ");
965 	if (p->md_attr & EFI_MD_ATTR_WB)
966 		printf("WB ");
967 	if (p->md_attr & EFI_MD_ATTR_UCE)
968 		printf("UCE ");
969 	if (p->md_attr & EFI_MD_ATTR_WP)
970 		printf("WP ");
971 	if (p->md_attr & EFI_MD_ATTR_RP)
972 		printf("RP ");
973 	if (p->md_attr & EFI_MD_ATTR_XP)
974 		printf("XP ");
975 	if (p->md_attr & EFI_MD_ATTR_NV)
976 		printf("NV ");
977 	if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
978 		printf("MORE_RELIABLE ");
979 	if (p->md_attr & EFI_MD_ATTR_RO)
980 		printf("RO ");
981 	if (p->md_attr & EFI_MD_ATTR_RT)
982 		printf("RUNTIME");
983 	printf("\n");
984 }
985 
986 static void
987 print_efi_map_entries(struct efi_map_header *efihdr)
988 {
989 
990 	printf("%23s %12s %12s %8s %4s\n",
991 	    "Type", "Physical", "Virtual", "#Pages", "Attr");
992 	foreach_efi_map_entry(efihdr, print_efi_map_entry);
993 }
994 
995 #ifdef FDT
996 static void
997 try_load_dtb(caddr_t kmdp)
998 {
999 	vm_offset_t dtbp;
1000 
1001 	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1002 #if defined(FDT_DTB_STATIC)
1003 	/*
1004 	 * In case the device tree blob was not retrieved (from metadata) try
1005 	 * to use the statically embedded one.
1006 	 */
1007 	if (dtbp == 0)
1008 		dtbp = (vm_offset_t)&fdt_static_dtb;
1009 #endif
1010 
1011 	if (dtbp == (vm_offset_t)NULL) {
1012 		printf("ERROR loading DTB\n");
1013 		return;
1014 	}
1015 
1016 	if (OF_install(OFW_FDT, 0) == FALSE)
1017 		panic("Cannot install FDT");
1018 
1019 	if (OF_init((void *)dtbp) != 0)
1020 		panic("OF_init failed with the found device tree");
1021 
1022 	parse_fdt_bootargs();
1023 }
1024 #endif
1025 
1026 static bool
1027 bus_probe(void)
1028 {
1029 	bool has_acpi, has_fdt;
1030 	char *order, *env;
1031 
1032 	has_acpi = has_fdt = false;
1033 
1034 #ifdef FDT
1035 	has_fdt = (OF_peer(0) != 0);
1036 #endif
1037 #ifdef DEV_ACPI
1038 	has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0);
1039 #endif
1040 
1041 	env = kern_getenv("kern.cfg.order");
1042 	if (env != NULL) {
1043 		order = env;
1044 		while (order != NULL) {
1045 			if (has_acpi &&
1046 			    strncmp(order, "acpi", 4) == 0 &&
1047 			    (order[4] == ',' || order[4] == '\0')) {
1048 				arm64_bus_method = ARM64_BUS_ACPI;
1049 				break;
1050 			}
1051 			if (has_fdt &&
1052 			    strncmp(order, "fdt", 3) == 0 &&
1053 			    (order[3] == ',' || order[3] == '\0')) {
1054 				arm64_bus_method = ARM64_BUS_FDT;
1055 				break;
1056 			}
1057 			order = strchr(order, ',');
1058 		}
1059 		freeenv(env);
1060 
1061 		/* If we set the bus method it is valid */
1062 		if (arm64_bus_method != ARM64_BUS_NONE)
1063 			return (true);
1064 	}
1065 	/* If no order or an invalid order was set use the default */
1066 	if (arm64_bus_method == ARM64_BUS_NONE) {
1067 		if (has_fdt)
1068 			arm64_bus_method = ARM64_BUS_FDT;
1069 		else if (has_acpi)
1070 			arm64_bus_method = ARM64_BUS_ACPI;
1071 	}
1072 
1073 	/*
1074 	 * If no option was set the default is valid, otherwise we are
1075 	 * setting one to get cninit() working, then calling panic to tell
1076 	 * the user about the invalid bus setup.
1077 	 */
1078 	return (env == NULL);
1079 }
1080 
1081 static void
1082 cache_setup(void)
1083 {
1084 	int dczva_line_shift;
1085 	uint32_t dczid_el0;
1086 
1087 	identify_cache(READ_SPECIALREG(ctr_el0));
1088 
1089 	dczid_el0 = READ_SPECIALREG(dczid_el0);
1090 
1091 	/* Check if dc zva is not prohibited */
1092 	if (dczid_el0 & DCZID_DZP)
1093 		dczva_line_size = 0;
1094 	else {
1095 		/* Same as with above calculations */
1096 		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
1097 		dczva_line_size = sizeof(int) << dczva_line_shift;
1098 
1099 		/* Change pagezero function */
1100 		pagezero = pagezero_cache;
1101 	}
1102 }
1103 
1104 int
1105 memory_mapping_mode(vm_paddr_t pa)
1106 {
1107 	struct efi_md *map, *p;
1108 	size_t efisz;
1109 	int ndesc, i;
1110 
1111 	if (efihdr == NULL)
1112 		return (VM_MEMATTR_WRITE_BACK);
1113 
1114 	/*
1115 	 * Memory map data provided by UEFI via the GetMemoryMap
1116 	 * Boot Services API.
1117 	 */
1118 	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
1119 	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1120 
1121 	if (efihdr->descriptor_size == 0)
1122 		return (VM_MEMATTR_WRITE_BACK);
1123 	ndesc = efihdr->memory_size / efihdr->descriptor_size;
1124 
1125 	for (i = 0, p = map; i < ndesc; i++,
1126 	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1127 		if (pa < p->md_phys ||
1128 		    pa >= p->md_phys + p->md_pages * EFI_PAGE_SIZE)
1129 			continue;
1130 		if (p->md_type == EFI_MD_TYPE_IOMEM ||
1131 		    p->md_type == EFI_MD_TYPE_IOPORT)
1132 			return (VM_MEMATTR_DEVICE);
1133 		else if ((p->md_attr & EFI_MD_ATTR_WB) != 0 ||
1134 		    p->md_type == EFI_MD_TYPE_RECLAIM)
1135 			return (VM_MEMATTR_WRITE_BACK);
1136 		else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
1137 			return (VM_MEMATTR_WRITE_THROUGH);
1138 		else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
1139 			return (VM_MEMATTR_WRITE_COMBINING);
1140 		break;
1141 	}
1142 
1143 	return (VM_MEMATTR_DEVICE);
1144 }
1145 
1146 void
1147 initarm(struct arm64_bootparams *abp)
1148 {
1149 	struct efi_fb *efifb;
1150 	struct pcpu *pcpup;
1151 	char *env;
1152 #ifdef FDT
1153 	struct mem_region mem_regions[FDT_MEM_REGIONS];
1154 	int mem_regions_sz;
1155 #endif
1156 	vm_offset_t lastaddr;
1157 	caddr_t kmdp;
1158 	bool valid;
1159 
1160 	boot_el = abp->boot_el;
1161 
1162 	/* Parse loader or FDT boot parametes. Determine last used address. */
1163 	lastaddr = parse_boot_param(abp);
1164 
1165 	/* Find the kernel address */
1166 	kmdp = preload_search_by_type("elf kernel");
1167 	if (kmdp == NULL)
1168 		kmdp = preload_search_by_type("elf64 kernel");
1169 
1170 	identify_cpu(0);
1171 	update_special_regs(0);
1172 
1173 	link_elf_ireloc(kmdp);
1174 	try_load_dtb(kmdp);
1175 
1176 	efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1177 
1178 	/* Load the physical memory ranges */
1179 	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1180 	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
1181 	if (efihdr != NULL)
1182 		add_efi_map_entries(efihdr);
1183 #ifdef FDT
1184 	else {
1185 		/* Grab physical memory regions information from device tree. */
1186 		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1187 		    NULL) != 0)
1188 			panic("Cannot get physical memory regions");
1189 		physmem_hardware_regions(mem_regions, mem_regions_sz);
1190 	}
1191 	if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0)
1192 		physmem_exclude_regions(mem_regions, mem_regions_sz,
1193 		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1194 #endif
1195 
1196 	/* Exclude the EFI framebuffer from our view of physical memory. */
1197 	efifb = (struct efi_fb *)preload_search_info(kmdp,
1198 	    MODINFO_METADATA | MODINFOMD_EFI_FB);
1199 	if (efifb != NULL)
1200 		physmem_exclude_region(efifb->fb_addr, efifb->fb_size,
1201 		    EXFLAG_NOALLOC);
1202 
1203 	/* Set the pcpu data, this is needed by pmap_bootstrap */
1204 	pcpup = &__pcpu[0];
1205 	pcpu_init(pcpup, 0, sizeof(struct pcpu));
1206 
1207 	/*
1208 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
1209 	 * loaded when entering the kernel from userland.
1210 	 */
1211 	__asm __volatile(
1212 	    "mov x18, %0 \n"
1213 	    "msr tpidr_el1, %0" :: "r"(pcpup));
1214 
1215 	PCPU_SET(curthread, &thread0);
1216 	PCPU_SET(midr, get_midr());
1217 
1218 	/* Do basic tuning, hz etc */
1219 	init_param1();
1220 
1221 	cache_setup();
1222 	pan_setup();
1223 
1224 	/* Bootstrap enough of pmap  to enter the kernel proper */
1225 	pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1226 	    KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1227 	/* Exclude entries neexed in teh DMAP region, but not phys_avail */
1228 	if (efihdr != NULL)
1229 		exclude_efi_map_entries(efihdr);
1230 	physmem_init_kernel_globals();
1231 
1232 	devmap_bootstrap(0, NULL);
1233 
1234 	valid = bus_probe();
1235 
1236 	cninit();
1237 
1238 	if (!valid)
1239 		panic("Invalid bus configuration: %s",
1240 		    kern_getenv("kern.cfg.order"));
1241 
1242 	init_proc0(abp->kern_stack);
1243 	msgbufinit(msgbufp, msgbufsize);
1244 	mutex_init();
1245 	init_param2(physmem);
1246 
1247 	dbg_init();
1248 	kdb_init();
1249 	pan_enable();
1250 
1251 	kcsan_cpu_init(0);
1252 
1253 	env = kern_getenv("kernelname");
1254 	if (env != NULL)
1255 		strlcpy(kernelname, env, sizeof(kernelname));
1256 
1257 	if (boothowto & RB_VERBOSE) {
1258 		print_efi_map_entries(efihdr);
1259 		physmem_print_tables();
1260 	}
1261 
1262 	early_boot = 0;
1263 }
1264 
1265 void
1266 dbg_init(void)
1267 {
1268 
1269 	/* Clear OS lock */
1270 	WRITE_SPECIALREG(oslar_el1, 0);
1271 
1272 	/* This permits DDB to use debug registers for watchpoints. */
1273 	dbg_monitor_init();
1274 
1275 	/* TODO: Eventually will need to initialize debug registers here. */
1276 }
1277 
1278 #ifdef DDB
1279 #include <ddb/ddb.h>
1280 
1281 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1282 {
1283 #define	PRINT_REG(reg)	\
1284     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1285 
1286 	PRINT_REG(actlr_el1);
1287 	PRINT_REG(afsr0_el1);
1288 	PRINT_REG(afsr1_el1);
1289 	PRINT_REG(aidr_el1);
1290 	PRINT_REG(amair_el1);
1291 	PRINT_REG(ccsidr_el1);
1292 	PRINT_REG(clidr_el1);
1293 	PRINT_REG(contextidr_el1);
1294 	PRINT_REG(cpacr_el1);
1295 	PRINT_REG(csselr_el1);
1296 	PRINT_REG(ctr_el0);
1297 	PRINT_REG(currentel);
1298 	PRINT_REG(daif);
1299 	PRINT_REG(dczid_el0);
1300 	PRINT_REG(elr_el1);
1301 	PRINT_REG(esr_el1);
1302 	PRINT_REG(far_el1);
1303 #if 0
1304 	/* ARM64TODO: Enable VFP before reading floating-point registers */
1305 	PRINT_REG(fpcr);
1306 	PRINT_REG(fpsr);
1307 #endif
1308 	PRINT_REG(id_aa64afr0_el1);
1309 	PRINT_REG(id_aa64afr1_el1);
1310 	PRINT_REG(id_aa64dfr0_el1);
1311 	PRINT_REG(id_aa64dfr1_el1);
1312 	PRINT_REG(id_aa64isar0_el1);
1313 	PRINT_REG(id_aa64isar1_el1);
1314 	PRINT_REG(id_aa64pfr0_el1);
1315 	PRINT_REG(id_aa64pfr1_el1);
1316 	PRINT_REG(id_afr0_el1);
1317 	PRINT_REG(id_dfr0_el1);
1318 	PRINT_REG(id_isar0_el1);
1319 	PRINT_REG(id_isar1_el1);
1320 	PRINT_REG(id_isar2_el1);
1321 	PRINT_REG(id_isar3_el1);
1322 	PRINT_REG(id_isar4_el1);
1323 	PRINT_REG(id_isar5_el1);
1324 	PRINT_REG(id_mmfr0_el1);
1325 	PRINT_REG(id_mmfr1_el1);
1326 	PRINT_REG(id_mmfr2_el1);
1327 	PRINT_REG(id_mmfr3_el1);
1328 #if 0
1329 	/* Missing from llvm */
1330 	PRINT_REG(id_mmfr4_el1);
1331 #endif
1332 	PRINT_REG(id_pfr0_el1);
1333 	PRINT_REG(id_pfr1_el1);
1334 	PRINT_REG(isr_el1);
1335 	PRINT_REG(mair_el1);
1336 	PRINT_REG(midr_el1);
1337 	PRINT_REG(mpidr_el1);
1338 	PRINT_REG(mvfr0_el1);
1339 	PRINT_REG(mvfr1_el1);
1340 	PRINT_REG(mvfr2_el1);
1341 	PRINT_REG(revidr_el1);
1342 	PRINT_REG(sctlr_el1);
1343 	PRINT_REG(sp_el0);
1344 	PRINT_REG(spsel);
1345 	PRINT_REG(spsr_el1);
1346 	PRINT_REG(tcr_el1);
1347 	PRINT_REG(tpidr_el0);
1348 	PRINT_REG(tpidr_el1);
1349 	PRINT_REG(tpidrro_el0);
1350 	PRINT_REG(ttbr0_el1);
1351 	PRINT_REG(ttbr1_el1);
1352 	PRINT_REG(vbar_el1);
1353 #undef PRINT_REG
1354 }
1355 
1356 DB_SHOW_COMMAND(vtop, db_show_vtop)
1357 {
1358 	uint64_t phys;
1359 
1360 	if (have_addr) {
1361 		phys = arm64_address_translate_s1e1r(addr);
1362 		db_printf("EL1 physical address reg (read):  0x%016lx\n", phys);
1363 		phys = arm64_address_translate_s1e1w(addr);
1364 		db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1365 		phys = arm64_address_translate_s1e0r(addr);
1366 		db_printf("EL0 physical address reg (read):  0x%016lx\n", phys);
1367 		phys = arm64_address_translate_s1e0w(addr);
1368 		db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1369 	} else
1370 		db_printf("show vtop <virt_addr>\n");
1371 }
1372 #endif
1373