xref: /freebsd/sys/arm64/arm64/machdep.c (revision c1d255d3)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_acpi.h"
29 #include "opt_platform.h"
30 #include "opt_ddb.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/cons.h>
40 #include <sys/cpu.h>
41 #include <sys/csan.h>
42 #include <sys/devmap.h>
43 #include <sys/efi.h>
44 #include <sys/exec.h>
45 #include <sys/imgact.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/limits.h>
50 #include <sys/linker.h>
51 #include <sys/msgbuf.h>
52 #include <sys/pcpu.h>
53 #include <sys/physmem.h>
54 #include <sys/proc.h>
55 #include <sys/ptrace.h>
56 #include <sys/reboot.h>
57 #include <sys/reg.h>
58 #include <sys/rwlock.h>
59 #include <sys/sched.h>
60 #include <sys/signalvar.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/sysent.h>
63 #include <sys/sysproto.h>
64 #include <sys/ucontext.h>
65 #include <sys/vdso.h>
66 #include <sys/vmmeter.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_object.h>
72 #include <vm/vm_page.h>
73 #include <vm/vm_phys.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_pager.h>
77 
78 #include <machine/armreg.h>
79 #include <machine/cpu.h>
80 #include <machine/debug_monitor.h>
81 #include <machine/kdb.h>
82 #include <machine/machdep.h>
83 #include <machine/metadata.h>
84 #include <machine/md_var.h>
85 #include <machine/pcb.h>
86 #include <machine/undefined.h>
87 #include <machine/vmparam.h>
88 
89 #ifdef VFP
90 #include <machine/vfp.h>
91 #endif
92 
93 #ifdef DEV_ACPI
94 #include <contrib/dev/acpica/include/acpi.h>
95 #include <machine/acpica_machdep.h>
96 #endif
97 
98 #ifdef FDT
99 #include <dev/fdt/fdt_common.h>
100 #include <dev/ofw/openfirm.h>
101 #endif
102 
103 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
104 static void set_fpcontext(struct thread *td, mcontext_t *mcp);
105 
106 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
107 
108 struct pcpu __pcpu[MAXCPU];
109 
110 static struct trapframe proc0_tf;
111 
112 int early_boot = 1;
113 int cold = 1;
114 static int boot_el;
115 
116 struct kva_md_info kmi;
117 
118 int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
119 int has_pan;
120 
121 /*
122  * Physical address of the EFI System Table. Stashed from the metadata hints
123  * passed into the kernel and used by the EFI code to call runtime services.
124  */
125 vm_paddr_t efi_systbl_phys;
126 static struct efi_map_header *efihdr;
127 
128 /* pagezero_* implementations are provided in support.S */
129 void pagezero_simple(void *);
130 void pagezero_cache(void *);
131 
132 /* pagezero_simple is default pagezero */
133 void (*pagezero)(void *p) = pagezero_simple;
134 
135 int (*apei_nmi)(void);
136 
137 static void
138 pan_setup(void)
139 {
140 	uint64_t id_aa64mfr1;
141 
142 	id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
143 	if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
144 		has_pan = 1;
145 }
146 
147 void
148 pan_enable(void)
149 {
150 
151 	/*
152 	 * The LLVM integrated assembler doesn't understand the PAN
153 	 * PSTATE field. Because of this we need to manually create
154 	 * the instruction in an asm block. This is equivalent to:
155 	 * msr pan, #1
156 	 *
157 	 * This sets the PAN bit, stopping the kernel from accessing
158 	 * memory when userspace can also access it unless the kernel
159 	 * uses the userspace load/store instructions.
160 	 */
161 	if (has_pan) {
162 		WRITE_SPECIALREG(sctlr_el1,
163 		    READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
164 		__asm __volatile(".inst 0xd500409f | (0x1 << 8)");
165 	}
166 }
167 
168 bool
169 has_hyp(void)
170 {
171 
172 	return (boot_el == 2);
173 }
174 
175 static void
176 cpu_startup(void *dummy)
177 {
178 	vm_paddr_t size;
179 	int i;
180 
181 	printf("real memory  = %ju (%ju MB)\n", ptoa((uintmax_t)realmem),
182 	    ptoa((uintmax_t)realmem) / 1024 / 1024);
183 
184 	if (bootverbose) {
185 		printf("Physical memory chunk(s):\n");
186 		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
187 			size = phys_avail[i + 1] - phys_avail[i];
188 			printf("%#016jx - %#016jx, %ju bytes (%ju pages)\n",
189 			    (uintmax_t)phys_avail[i],
190 			    (uintmax_t)phys_avail[i + 1] - 1,
191 			    (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
192 		}
193 	}
194 
195 	printf("avail memory = %ju (%ju MB)\n",
196 	    ptoa((uintmax_t)vm_free_count()),
197 	    ptoa((uintmax_t)vm_free_count()) / 1024 / 1024);
198 
199 	undef_init();
200 	install_cpu_errata();
201 
202 	vm_ksubmap_init(&kmi);
203 	bufinit();
204 	vm_pager_bufferinit();
205 }
206 
207 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
208 
209 static void
210 late_ifunc_resolve(void *dummy __unused)
211 {
212 	link_elf_late_ireloc();
213 }
214 SYSINIT(late_ifunc_resolve, SI_SUB_CPU, SI_ORDER_ANY, late_ifunc_resolve, NULL);
215 
216 int
217 cpu_idle_wakeup(int cpu)
218 {
219 
220 	return (0);
221 }
222 
223 int
224 fill_regs(struct thread *td, struct reg *regs)
225 {
226 	struct trapframe *frame;
227 
228 	frame = td->td_frame;
229 	regs->sp = frame->tf_sp;
230 	regs->lr = frame->tf_lr;
231 	regs->elr = frame->tf_elr;
232 	regs->spsr = frame->tf_spsr;
233 
234 	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
235 
236 #ifdef COMPAT_FREEBSD32
237 	/*
238 	 * We may be called here for a 32bits process, if we're using a
239 	 * 64bits debugger. If so, put PC and SPSR where it expects it.
240 	 */
241 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
242 		regs->x[15] = frame->tf_elr;
243 		regs->x[16] = frame->tf_spsr;
244 	}
245 #endif
246 	return (0);
247 }
248 
249 int
250 set_regs(struct thread *td, struct reg *regs)
251 {
252 	struct trapframe *frame;
253 
254 	frame = td->td_frame;
255 	frame->tf_sp = regs->sp;
256 	frame->tf_lr = regs->lr;
257 	frame->tf_elr = regs->elr;
258 	frame->tf_spsr &= ~PSR_FLAGS;
259 	frame->tf_spsr |= regs->spsr & PSR_FLAGS;
260 
261 	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
262 
263 #ifdef COMPAT_FREEBSD32
264 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
265 		/*
266 		 * We may be called for a 32bits process if we're using
267 		 * a 64bits debugger. If so, get PC and SPSR from where
268 		 * it put it.
269 		 */
270 		frame->tf_elr = regs->x[15];
271 		frame->tf_spsr = regs->x[16] & PSR_FLAGS;
272 	}
273 #endif
274 	return (0);
275 }
276 
277 int
278 fill_fpregs(struct thread *td, struct fpreg *regs)
279 {
280 #ifdef VFP
281 	struct pcb *pcb;
282 
283 	pcb = td->td_pcb;
284 	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
285 		/*
286 		 * If we have just been running VFP instructions we will
287 		 * need to save the state to memcpy it below.
288 		 */
289 		if (td == curthread)
290 			vfp_save_state(td, pcb);
291 
292 		KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
293 		    ("Called fill_fpregs while the kernel is using the VFP"));
294 		memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
295 		    sizeof(regs->fp_q));
296 		regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
297 		regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
298 	} else
299 #endif
300 		memset(regs, 0, sizeof(*regs));
301 	return (0);
302 }
303 
304 int
305 set_fpregs(struct thread *td, struct fpreg *regs)
306 {
307 #ifdef VFP
308 	struct pcb *pcb;
309 
310 	pcb = td->td_pcb;
311 	KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
312 	    ("Called set_fpregs while the kernel is using the VFP"));
313 	memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
314 	pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
315 	pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
316 #endif
317 	return (0);
318 }
319 
320 int
321 fill_dbregs(struct thread *td, struct dbreg *regs)
322 {
323 	struct debug_monitor_state *monitor;
324 	int i;
325 	uint8_t debug_ver, nbkpts, nwtpts;
326 
327 	memset(regs, 0, sizeof(*regs));
328 
329 	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_DebugVer_SHIFT,
330 	    &debug_ver);
331 	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_BRPs_SHIFT,
332 	    &nbkpts);
333 	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_WRPs_SHIFT,
334 	    &nwtpts);
335 
336 	/*
337 	 * The BRPs field contains the number of breakpoints - 1. Armv8-A
338 	 * allows the hardware to provide 2-16 breakpoints so this won't
339 	 * overflow an 8 bit value. The same applies to the WRPs field.
340 	 */
341 	nbkpts++;
342 	nwtpts++;
343 
344 	regs->db_debug_ver = debug_ver;
345 	regs->db_nbkpts = nbkpts;
346 	regs->db_nwtpts = nwtpts;
347 
348 	monitor = &td->td_pcb->pcb_dbg_regs;
349 	if ((monitor->dbg_flags & DBGMON_ENABLED) != 0) {
350 		for (i = 0; i < nbkpts; i++) {
351 			regs->db_breakregs[i].dbr_addr = monitor->dbg_bvr[i];
352 			regs->db_breakregs[i].dbr_ctrl = monitor->dbg_bcr[i];
353 		}
354 		for (i = 0; i < nwtpts; i++) {
355 			regs->db_watchregs[i].dbw_addr = monitor->dbg_wvr[i];
356 			regs->db_watchregs[i].dbw_ctrl = monitor->dbg_wcr[i];
357 		}
358 	}
359 
360 	return (0);
361 }
362 
363 int
364 set_dbregs(struct thread *td, struct dbreg *regs)
365 {
366 	struct debug_monitor_state *monitor;
367 	uint64_t addr;
368 	uint32_t ctrl;
369 	int count;
370 	int i;
371 
372 	monitor = &td->td_pcb->pcb_dbg_regs;
373 	count = 0;
374 	monitor->dbg_enable_count = 0;
375 
376 	for (i = 0; i < DBG_BRP_MAX; i++) {
377 		addr = regs->db_breakregs[i].dbr_addr;
378 		ctrl = regs->db_breakregs[i].dbr_ctrl;
379 
380 		/*
381 		 * Don't let the user set a breakpoint on a kernel or
382 		 * non-canonical user address.
383 		 */
384 		if (addr >= VM_MAXUSER_ADDRESS)
385 			return (EINVAL);
386 
387 		/*
388 		 * The lowest 2 bits are ignored, so record the effective
389 		 * address.
390 		 */
391 		addr = rounddown2(addr, 4);
392 
393 		/*
394 		 * Some control fields are ignored, and other bits reserved.
395 		 * Only unlinked, address-matching breakpoints are supported.
396 		 *
397 		 * XXX: fields that appear unvalidated, such as BAS, have
398 		 * constrained undefined behaviour. If the user mis-programs
399 		 * these, there is no risk to the system.
400 		 */
401 		ctrl &= DBG_BCR_EN | DBG_BCR_PMC | DBG_BCR_BAS;
402 		if ((ctrl & DBG_BCR_EN) != 0) {
403 			/* Only target EL0. */
404 			if ((ctrl & DBG_BCR_PMC) != DBG_BCR_PMC_EL0)
405 				return (EINVAL);
406 
407 			monitor->dbg_enable_count++;
408 		}
409 
410 		monitor->dbg_bvr[i] = addr;
411 		monitor->dbg_bcr[i] = ctrl;
412 	}
413 
414 	for (i = 0; i < DBG_WRP_MAX; i++) {
415 		addr = regs->db_watchregs[i].dbw_addr;
416 		ctrl = regs->db_watchregs[i].dbw_ctrl;
417 
418 		/*
419 		 * Don't let the user set a watchpoint on a kernel or
420 		 * non-canonical user address.
421 		 */
422 		if (addr >= VM_MAXUSER_ADDRESS)
423 			return (EINVAL);
424 
425 		/*
426 		 * Some control fields are ignored, and other bits reserved.
427 		 * Only unlinked watchpoints are supported.
428 		 */
429 		ctrl &= DBG_WCR_EN | DBG_WCR_PAC | DBG_WCR_LSC | DBG_WCR_BAS |
430 		    DBG_WCR_MASK;
431 
432 		if ((ctrl & DBG_WCR_EN) != 0) {
433 			/* Only target EL0. */
434 			if ((ctrl & DBG_WCR_PAC) != DBG_WCR_PAC_EL0)
435 				return (EINVAL);
436 
437 			/* Must set at least one of the load/store bits. */
438 			if ((ctrl & DBG_WCR_LSC) == 0)
439 				return (EINVAL);
440 
441 			/*
442 			 * When specifying the address range with BAS, the MASK
443 			 * field must be zero.
444 			 */
445 			if ((ctrl & DBG_WCR_BAS) != DBG_WCR_BAS_MASK &&
446 			    (ctrl & DBG_WCR_MASK) != 0)
447 				return (EINVAL);
448 
449 			monitor->dbg_enable_count++;
450 		}
451 		monitor->dbg_wvr[i] = addr;
452 		monitor->dbg_wcr[i] = ctrl;
453 	}
454 
455 	if (monitor->dbg_enable_count > 0)
456 		monitor->dbg_flags |= DBGMON_ENABLED;
457 
458 	return (0);
459 }
460 
461 #ifdef COMPAT_FREEBSD32
462 int
463 fill_regs32(struct thread *td, struct reg32 *regs)
464 {
465 	int i;
466 	struct trapframe *tf;
467 
468 	tf = td->td_frame;
469 	for (i = 0; i < 13; i++)
470 		regs->r[i] = tf->tf_x[i];
471 	/* For arm32, SP is r13 and LR is r14 */
472 	regs->r_sp = tf->tf_x[13];
473 	regs->r_lr = tf->tf_x[14];
474 	regs->r_pc = tf->tf_elr;
475 	regs->r_cpsr = tf->tf_spsr;
476 
477 	return (0);
478 }
479 
480 int
481 set_regs32(struct thread *td, struct reg32 *regs)
482 {
483 	int i;
484 	struct trapframe *tf;
485 
486 	tf = td->td_frame;
487 	for (i = 0; i < 13; i++)
488 		tf->tf_x[i] = regs->r[i];
489 	/* For arm 32, SP is r13 an LR is r14 */
490 	tf->tf_x[13] = regs->r_sp;
491 	tf->tf_x[14] = regs->r_lr;
492 	tf->tf_elr = regs->r_pc;
493 	tf->tf_spsr = regs->r_cpsr;
494 
495 	return (0);
496 }
497 
498 /* XXX fill/set dbregs/fpregs are stubbed on 32-bit arm. */
499 int
500 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
501 {
502 
503 	memset(regs, 0, sizeof(*regs));
504 	return (0);
505 }
506 
507 int
508 set_fpregs32(struct thread *td, struct fpreg32 *regs)
509 {
510 
511 	return (0);
512 }
513 
514 int
515 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
516 {
517 
518 	memset(regs, 0, sizeof(*regs));
519 	return (0);
520 }
521 
522 int
523 set_dbregs32(struct thread *td, struct dbreg32 *regs)
524 {
525 
526 	return (0);
527 }
528 #endif
529 
530 int
531 ptrace_set_pc(struct thread *td, u_long addr)
532 {
533 
534 	td->td_frame->tf_elr = addr;
535 	return (0);
536 }
537 
538 int
539 ptrace_single_step(struct thread *td)
540 {
541 
542 	td->td_frame->tf_spsr |= PSR_SS;
543 	td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
544 	return (0);
545 }
546 
547 int
548 ptrace_clear_single_step(struct thread *td)
549 {
550 
551 	td->td_frame->tf_spsr &= ~PSR_SS;
552 	td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
553 	return (0);
554 }
555 
556 void
557 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
558 {
559 	struct trapframe *tf = td->td_frame;
560 	struct pcb *pcb = td->td_pcb;
561 
562 	memset(tf, 0, sizeof(struct trapframe));
563 
564 	tf->tf_x[0] = stack;
565 	tf->tf_sp = STACKALIGN(stack);
566 	tf->tf_lr = imgp->entry_addr;
567 	tf->tf_elr = imgp->entry_addr;
568 
569 	td->td_pcb->pcb_tpidr_el0 = 0;
570 	td->td_pcb->pcb_tpidrro_el0 = 0;
571 	WRITE_SPECIALREG(tpidrro_el0, 0);
572 	WRITE_SPECIALREG(tpidr_el0, 0);
573 
574 #ifdef VFP
575 	vfp_reset_state(td, pcb);
576 #endif
577 
578 	/*
579 	 * Clear debug register state. It is not applicable to the new process.
580 	 */
581 	bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs));
582 }
583 
584 /* Sanity check these are the same size, they will be memcpy'd to and fro */
585 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
586     sizeof((struct gpregs *)0)->gp_x);
587 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
588     sizeof((struct reg *)0)->x);
589 
590 int
591 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
592 {
593 	struct trapframe *tf = td->td_frame;
594 
595 	if (clear_ret & GET_MC_CLEAR_RET) {
596 		mcp->mc_gpregs.gp_x[0] = 0;
597 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
598 	} else {
599 		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
600 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
601 	}
602 
603 	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
604 	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
605 
606 	mcp->mc_gpregs.gp_sp = tf->tf_sp;
607 	mcp->mc_gpregs.gp_lr = tf->tf_lr;
608 	mcp->mc_gpregs.gp_elr = tf->tf_elr;
609 	get_fpcontext(td, mcp);
610 
611 	return (0);
612 }
613 
614 int
615 set_mcontext(struct thread *td, mcontext_t *mcp)
616 {
617 	struct trapframe *tf = td->td_frame;
618 	uint32_t spsr;
619 
620 	spsr = mcp->mc_gpregs.gp_spsr;
621 	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
622 	    (spsr & PSR_AARCH32) != 0 ||
623 	    (spsr & PSR_DAIF) != (td->td_frame->tf_spsr & PSR_DAIF))
624 		return (EINVAL);
625 
626 	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
627 
628 	tf->tf_sp = mcp->mc_gpregs.gp_sp;
629 	tf->tf_lr = mcp->mc_gpregs.gp_lr;
630 	tf->tf_elr = mcp->mc_gpregs.gp_elr;
631 	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
632 	set_fpcontext(td, mcp);
633 
634 	return (0);
635 }
636 
637 static void
638 get_fpcontext(struct thread *td, mcontext_t *mcp)
639 {
640 #ifdef VFP
641 	struct pcb *curpcb;
642 
643 	critical_enter();
644 
645 	curpcb = curthread->td_pcb;
646 
647 	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
648 		/*
649 		 * If we have just been running VFP instructions we will
650 		 * need to save the state to memcpy it below.
651 		 */
652 		vfp_save_state(td, curpcb);
653 
654 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
655 		    ("Called get_fpcontext while the kernel is using the VFP"));
656 		KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
657 		    ("Non-userspace FPU flags set in get_fpcontext"));
658 		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
659 		    sizeof(mcp->mc_fpregs.fp_q));
660 		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
661 		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
662 		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
663 		mcp->mc_flags |= _MC_FP_VALID;
664 	}
665 
666 	critical_exit();
667 #endif
668 }
669 
670 static void
671 set_fpcontext(struct thread *td, mcontext_t *mcp)
672 {
673 #ifdef VFP
674 	struct pcb *curpcb;
675 
676 	critical_enter();
677 
678 	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
679 		curpcb = curthread->td_pcb;
680 
681 		/*
682 		 * Discard any vfp state for the current thread, we
683 		 * are about to override it.
684 		 */
685 		vfp_discard(td);
686 
687 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
688 		    ("Called set_fpcontext while the kernel is using the VFP"));
689 		memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
690 		    sizeof(mcp->mc_fpregs.fp_q));
691 		curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
692 		curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
693 		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
694 	}
695 
696 	critical_exit();
697 #endif
698 }
699 
700 void
701 cpu_idle(int busy)
702 {
703 
704 	spinlock_enter();
705 	if (!busy)
706 		cpu_idleclock();
707 	if (!sched_runnable())
708 		__asm __volatile(
709 		    "dsb sy \n"
710 		    "wfi    \n");
711 	if (!busy)
712 		cpu_activeclock();
713 	spinlock_exit();
714 }
715 
716 void
717 cpu_halt(void)
718 {
719 
720 	/* We should have shutdown by now, if not enter a low power sleep */
721 	intr_disable();
722 	while (1) {
723 		__asm __volatile("wfi");
724 	}
725 }
726 
727 /*
728  * Flush the D-cache for non-DMA I/O so that the I-cache can
729  * be made coherent later.
730  */
731 void
732 cpu_flush_dcache(void *ptr, size_t len)
733 {
734 
735 	/* ARM64TODO TBD */
736 }
737 
738 /* Get current clock frequency for the given CPU ID. */
739 int
740 cpu_est_clockrate(int cpu_id, uint64_t *rate)
741 {
742 	struct pcpu *pc;
743 
744 	pc = pcpu_find(cpu_id);
745 	if (pc == NULL || rate == NULL)
746 		return (EINVAL);
747 
748 	if (pc->pc_clock == 0)
749 		return (EOPNOTSUPP);
750 
751 	*rate = pc->pc_clock;
752 	return (0);
753 }
754 
755 void
756 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
757 {
758 
759 	pcpu->pc_acpi_id = 0xffffffff;
760 	pcpu->pc_mpidr = 0xffffffff;
761 }
762 
763 void
764 spinlock_enter(void)
765 {
766 	struct thread *td;
767 	register_t daif;
768 
769 	td = curthread;
770 	if (td->td_md.md_spinlock_count == 0) {
771 		daif = intr_disable();
772 		td->td_md.md_spinlock_count = 1;
773 		td->td_md.md_saved_daif = daif;
774 		critical_enter();
775 	} else
776 		td->td_md.md_spinlock_count++;
777 }
778 
779 void
780 spinlock_exit(void)
781 {
782 	struct thread *td;
783 	register_t daif;
784 
785 	td = curthread;
786 	daif = td->td_md.md_saved_daif;
787 	td->td_md.md_spinlock_count--;
788 	if (td->td_md.md_spinlock_count == 0) {
789 		critical_exit();
790 		intr_restore(daif);
791 	}
792 }
793 
794 #ifndef	_SYS_SYSPROTO_H_
795 struct sigreturn_args {
796 	ucontext_t *ucp;
797 };
798 #endif
799 
800 int
801 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
802 {
803 	ucontext_t uc;
804 	int error;
805 
806 	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
807 		return (EFAULT);
808 
809 	error = set_mcontext(td, &uc.uc_mcontext);
810 	if (error != 0)
811 		return (error);
812 
813 	/* Restore signal mask. */
814 	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
815 
816 	return (EJUSTRETURN);
817 }
818 
819 /*
820  * Construct a PCB from a trapframe. This is called from kdb_trap() where
821  * we want to start a backtrace from the function that caused us to enter
822  * the debugger. We have the context in the trapframe, but base the trace
823  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
824  * enough for a backtrace.
825  */
826 void
827 makectx(struct trapframe *tf, struct pcb *pcb)
828 {
829 	int i;
830 
831 	for (i = 0; i < nitems(pcb->pcb_x); i++)
832 		pcb->pcb_x[i] = tf->tf_x[i];
833 
834 	/* NB: pcb_lr is the PC, see PC_REGS() in db_machdep.h */
835 	pcb->pcb_lr = tf->tf_elr;
836 	pcb->pcb_sp = tf->tf_sp;
837 }
838 
839 void
840 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
841 {
842 	struct thread *td;
843 	struct proc *p;
844 	struct trapframe *tf;
845 	struct sigframe *fp, frame;
846 	struct sigacts *psp;
847 	struct sysentvec *sysent;
848 	int onstack, sig;
849 
850 	td = curthread;
851 	p = td->td_proc;
852 	PROC_LOCK_ASSERT(p, MA_OWNED);
853 
854 	sig = ksi->ksi_signo;
855 	psp = p->p_sigacts;
856 	mtx_assert(&psp->ps_mtx, MA_OWNED);
857 
858 	tf = td->td_frame;
859 	onstack = sigonstack(tf->tf_sp);
860 
861 	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
862 	    catcher, sig);
863 
864 	/* Allocate and validate space for the signal handler context. */
865 	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
866 	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
867 		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
868 		    td->td_sigstk.ss_size);
869 #if defined(COMPAT_43)
870 		td->td_sigstk.ss_flags |= SS_ONSTACK;
871 #endif
872 	} else {
873 		fp = (struct sigframe *)td->td_frame->tf_sp;
874 	}
875 
876 	/* Make room, keeping the stack aligned */
877 	fp--;
878 	fp = (struct sigframe *)STACKALIGN(fp);
879 
880 	/* Fill in the frame to copy out */
881 	bzero(&frame, sizeof(frame));
882 	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
883 	frame.sf_si = ksi->ksi_info;
884 	frame.sf_uc.uc_sigmask = *mask;
885 	frame.sf_uc.uc_stack = td->td_sigstk;
886 	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
887 	    (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
888 	mtx_unlock(&psp->ps_mtx);
889 	PROC_UNLOCK(td->td_proc);
890 
891 	/* Copy the sigframe out to the user's stack. */
892 	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
893 		/* Process has trashed its stack. Kill it. */
894 		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
895 		PROC_LOCK(p);
896 		sigexit(td, SIGILL);
897 	}
898 
899 	tf->tf_x[0]= sig;
900 	tf->tf_x[1] = (register_t)&fp->sf_si;
901 	tf->tf_x[2] = (register_t)&fp->sf_uc;
902 
903 	tf->tf_elr = (register_t)catcher;
904 	tf->tf_sp = (register_t)fp;
905 	sysent = p->p_sysent;
906 	if (sysent->sv_sigcode_base != 0)
907 		tf->tf_lr = (register_t)sysent->sv_sigcode_base;
908 	else
909 		tf->tf_lr = (register_t)(sysent->sv_psstrings -
910 		    *(sysent->sv_szsigcode));
911 
912 	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
913 	    tf->tf_sp);
914 
915 	PROC_LOCK(p);
916 	mtx_lock(&psp->ps_mtx);
917 }
918 
919 static void
920 init_proc0(vm_offset_t kstack)
921 {
922 	struct pcpu *pcpup = &__pcpu[0];
923 
924 	proc_linkup0(&proc0, &thread0);
925 	thread0.td_kstack = kstack;
926 	thread0.td_kstack_pages = KSTACK_PAGES;
927 	thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
928 	    thread0.td_kstack_pages * PAGE_SIZE) - 1;
929 	thread0.td_pcb->pcb_fpflags = 0;
930 	thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
931 	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
932 	thread0.td_frame = &proc0_tf;
933 	pcpup->pc_curpcb = thread0.td_pcb;
934 
935 	/*
936 	 * Unmask SError exceptions. They are used to signal a RAS failure,
937 	 * or other hardware error.
938 	 */
939 	serror_enable();
940 }
941 
942 typedef struct {
943 	uint32_t type;
944 	uint64_t phys_start;
945 	uint64_t virt_start;
946 	uint64_t num_pages;
947 	uint64_t attr;
948 } EFI_MEMORY_DESCRIPTOR;
949 
950 typedef void (*efi_map_entry_cb)(struct efi_md *);
951 
952 static void
953 foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb)
954 {
955 	struct efi_md *map, *p;
956 	size_t efisz;
957 	int ndesc, i;
958 
959 	/*
960 	 * Memory map data provided by UEFI via the GetMemoryMap
961 	 * Boot Services API.
962 	 */
963 	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
964 	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
965 
966 	if (efihdr->descriptor_size == 0)
967 		return;
968 	ndesc = efihdr->memory_size / efihdr->descriptor_size;
969 
970 	for (i = 0, p = map; i < ndesc; i++,
971 	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
972 		cb(p);
973 	}
974 }
975 
976 static void
977 exclude_efi_map_entry(struct efi_md *p)
978 {
979 
980 	switch (p->md_type) {
981 	case EFI_MD_TYPE_CODE:
982 	case EFI_MD_TYPE_DATA:
983 	case EFI_MD_TYPE_BS_CODE:
984 	case EFI_MD_TYPE_BS_DATA:
985 	case EFI_MD_TYPE_FREE:
986 		/*
987 		 * We're allowed to use any entry with these types.
988 		 */
989 		break;
990 	default:
991 		physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE,
992 		    EXFLAG_NOALLOC);
993 	}
994 }
995 
996 static void
997 exclude_efi_map_entries(struct efi_map_header *efihdr)
998 {
999 
1000 	foreach_efi_map_entry(efihdr, exclude_efi_map_entry);
1001 }
1002 
1003 static void
1004 add_efi_map_entry(struct efi_md *p)
1005 {
1006 
1007 	switch (p->md_type) {
1008 	case EFI_MD_TYPE_RT_DATA:
1009 		/*
1010 		 * Runtime data will be excluded after the DMAP
1011 		 * region is created to stop it from being added
1012 		 * to phys_avail.
1013 		 */
1014 	case EFI_MD_TYPE_CODE:
1015 	case EFI_MD_TYPE_DATA:
1016 	case EFI_MD_TYPE_BS_CODE:
1017 	case EFI_MD_TYPE_BS_DATA:
1018 	case EFI_MD_TYPE_FREE:
1019 		/*
1020 		 * We're allowed to use any entry with these types.
1021 		 */
1022 		physmem_hardware_region(p->md_phys,
1023 		    p->md_pages * PAGE_SIZE);
1024 		break;
1025 	}
1026 }
1027 
1028 static void
1029 add_efi_map_entries(struct efi_map_header *efihdr)
1030 {
1031 
1032 	foreach_efi_map_entry(efihdr, add_efi_map_entry);
1033 }
1034 
1035 static void
1036 print_efi_map_entry(struct efi_md *p)
1037 {
1038 	const char *type;
1039 	static const char *types[] = {
1040 		"Reserved",
1041 		"LoaderCode",
1042 		"LoaderData",
1043 		"BootServicesCode",
1044 		"BootServicesData",
1045 		"RuntimeServicesCode",
1046 		"RuntimeServicesData",
1047 		"ConventionalMemory",
1048 		"UnusableMemory",
1049 		"ACPIReclaimMemory",
1050 		"ACPIMemoryNVS",
1051 		"MemoryMappedIO",
1052 		"MemoryMappedIOPortSpace",
1053 		"PalCode",
1054 		"PersistentMemory"
1055 	};
1056 
1057 	if (p->md_type < nitems(types))
1058 		type = types[p->md_type];
1059 	else
1060 		type = "<INVALID>";
1061 	printf("%23s %012lx %012lx %08lx ", type, p->md_phys,
1062 	    p->md_virt, p->md_pages);
1063 	if (p->md_attr & EFI_MD_ATTR_UC)
1064 		printf("UC ");
1065 	if (p->md_attr & EFI_MD_ATTR_WC)
1066 		printf("WC ");
1067 	if (p->md_attr & EFI_MD_ATTR_WT)
1068 		printf("WT ");
1069 	if (p->md_attr & EFI_MD_ATTR_WB)
1070 		printf("WB ");
1071 	if (p->md_attr & EFI_MD_ATTR_UCE)
1072 		printf("UCE ");
1073 	if (p->md_attr & EFI_MD_ATTR_WP)
1074 		printf("WP ");
1075 	if (p->md_attr & EFI_MD_ATTR_RP)
1076 		printf("RP ");
1077 	if (p->md_attr & EFI_MD_ATTR_XP)
1078 		printf("XP ");
1079 	if (p->md_attr & EFI_MD_ATTR_NV)
1080 		printf("NV ");
1081 	if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
1082 		printf("MORE_RELIABLE ");
1083 	if (p->md_attr & EFI_MD_ATTR_RO)
1084 		printf("RO ");
1085 	if (p->md_attr & EFI_MD_ATTR_RT)
1086 		printf("RUNTIME");
1087 	printf("\n");
1088 }
1089 
1090 static void
1091 print_efi_map_entries(struct efi_map_header *efihdr)
1092 {
1093 
1094 	printf("%23s %12s %12s %8s %4s\n",
1095 	    "Type", "Physical", "Virtual", "#Pages", "Attr");
1096 	foreach_efi_map_entry(efihdr, print_efi_map_entry);
1097 }
1098 
1099 #ifdef FDT
1100 static void
1101 try_load_dtb(caddr_t kmdp)
1102 {
1103 	vm_offset_t dtbp;
1104 
1105 	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1106 #if defined(FDT_DTB_STATIC)
1107 	/*
1108 	 * In case the device tree blob was not retrieved (from metadata) try
1109 	 * to use the statically embedded one.
1110 	 */
1111 	if (dtbp == 0)
1112 		dtbp = (vm_offset_t)&fdt_static_dtb;
1113 #endif
1114 
1115 	if (dtbp == (vm_offset_t)NULL) {
1116 #ifndef TSLOG
1117 		printf("ERROR loading DTB\n");
1118 #endif
1119 		return;
1120 	}
1121 
1122 	if (OF_install(OFW_FDT, 0) == FALSE)
1123 		panic("Cannot install FDT");
1124 
1125 	if (OF_init((void *)dtbp) != 0)
1126 		panic("OF_init failed with the found device tree");
1127 
1128 	parse_fdt_bootargs();
1129 }
1130 #endif
1131 
1132 static bool
1133 bus_probe(void)
1134 {
1135 	bool has_acpi, has_fdt;
1136 	char *order, *env;
1137 
1138 	has_acpi = has_fdt = false;
1139 
1140 #ifdef FDT
1141 	has_fdt = (OF_peer(0) != 0);
1142 #endif
1143 #ifdef DEV_ACPI
1144 	has_acpi = (AcpiOsGetRootPointer() != 0);
1145 #endif
1146 
1147 	env = kern_getenv("kern.cfg.order");
1148 	if (env != NULL) {
1149 		order = env;
1150 		while (order != NULL) {
1151 			if (has_acpi &&
1152 			    strncmp(order, "acpi", 4) == 0 &&
1153 			    (order[4] == ',' || order[4] == '\0')) {
1154 				arm64_bus_method = ARM64_BUS_ACPI;
1155 				break;
1156 			}
1157 			if (has_fdt &&
1158 			    strncmp(order, "fdt", 3) == 0 &&
1159 			    (order[3] == ',' || order[3] == '\0')) {
1160 				arm64_bus_method = ARM64_BUS_FDT;
1161 				break;
1162 			}
1163 			order = strchr(order, ',');
1164 		}
1165 		freeenv(env);
1166 
1167 		/* If we set the bus method it is valid */
1168 		if (arm64_bus_method != ARM64_BUS_NONE)
1169 			return (true);
1170 	}
1171 	/* If no order or an invalid order was set use the default */
1172 	if (arm64_bus_method == ARM64_BUS_NONE) {
1173 		if (has_fdt)
1174 			arm64_bus_method = ARM64_BUS_FDT;
1175 		else if (has_acpi)
1176 			arm64_bus_method = ARM64_BUS_ACPI;
1177 	}
1178 
1179 	/*
1180 	 * If no option was set the default is valid, otherwise we are
1181 	 * setting one to get cninit() working, then calling panic to tell
1182 	 * the user about the invalid bus setup.
1183 	 */
1184 	return (env == NULL);
1185 }
1186 
1187 static void
1188 cache_setup(void)
1189 {
1190 	int dczva_line_shift;
1191 	uint32_t dczid_el0;
1192 
1193 	identify_cache(READ_SPECIALREG(ctr_el0));
1194 
1195 	dczid_el0 = READ_SPECIALREG(dczid_el0);
1196 
1197 	/* Check if dc zva is not prohibited */
1198 	if (dczid_el0 & DCZID_DZP)
1199 		dczva_line_size = 0;
1200 	else {
1201 		/* Same as with above calculations */
1202 		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
1203 		dczva_line_size = sizeof(int) << dczva_line_shift;
1204 
1205 		/* Change pagezero function */
1206 		pagezero = pagezero_cache;
1207 	}
1208 }
1209 
1210 int
1211 memory_mapping_mode(vm_paddr_t pa)
1212 {
1213 	struct efi_md *map, *p;
1214 	size_t efisz;
1215 	int ndesc, i;
1216 
1217 	if (efihdr == NULL)
1218 		return (VM_MEMATTR_WRITE_BACK);
1219 
1220 	/*
1221 	 * Memory map data provided by UEFI via the GetMemoryMap
1222 	 * Boot Services API.
1223 	 */
1224 	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
1225 	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1226 
1227 	if (efihdr->descriptor_size == 0)
1228 		return (VM_MEMATTR_WRITE_BACK);
1229 	ndesc = efihdr->memory_size / efihdr->descriptor_size;
1230 
1231 	for (i = 0, p = map; i < ndesc; i++,
1232 	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1233 		if (pa < p->md_phys ||
1234 		    pa >= p->md_phys + p->md_pages * EFI_PAGE_SIZE)
1235 			continue;
1236 		if (p->md_type == EFI_MD_TYPE_IOMEM ||
1237 		    p->md_type == EFI_MD_TYPE_IOPORT)
1238 			return (VM_MEMATTR_DEVICE);
1239 		else if ((p->md_attr & EFI_MD_ATTR_WB) != 0 ||
1240 		    p->md_type == EFI_MD_TYPE_RECLAIM)
1241 			return (VM_MEMATTR_WRITE_BACK);
1242 		else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
1243 			return (VM_MEMATTR_WRITE_THROUGH);
1244 		else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
1245 			return (VM_MEMATTR_WRITE_COMBINING);
1246 		break;
1247 	}
1248 
1249 	return (VM_MEMATTR_DEVICE);
1250 }
1251 
1252 void
1253 initarm(struct arm64_bootparams *abp)
1254 {
1255 	struct efi_fb *efifb;
1256 	struct pcpu *pcpup;
1257 	char *env;
1258 #ifdef FDT
1259 	struct mem_region mem_regions[FDT_MEM_REGIONS];
1260 	int mem_regions_sz;
1261 	phandle_t root;
1262 	char dts_version[255];
1263 #endif
1264 	vm_offset_t lastaddr;
1265 	caddr_t kmdp;
1266 	bool valid;
1267 
1268 	TSRAW(&thread0, TS_ENTER, __func__, NULL);
1269 
1270 	boot_el = abp->boot_el;
1271 
1272 	/* Parse loader or FDT boot parametes. Determine last used address. */
1273 	lastaddr = parse_boot_param(abp);
1274 
1275 	/* Find the kernel address */
1276 	kmdp = preload_search_by_type("elf kernel");
1277 	if (kmdp == NULL)
1278 		kmdp = preload_search_by_type("elf64 kernel");
1279 
1280 	identify_cpu(0);
1281 	update_special_regs(0);
1282 
1283 	link_elf_ireloc(kmdp);
1284 	try_load_dtb(kmdp);
1285 
1286 	efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1287 
1288 	/* Load the physical memory ranges */
1289 	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1290 	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
1291 	if (efihdr != NULL)
1292 		add_efi_map_entries(efihdr);
1293 #ifdef FDT
1294 	else {
1295 		/* Grab physical memory regions information from device tree. */
1296 		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1297 		    NULL) != 0)
1298 			panic("Cannot get physical memory regions");
1299 		physmem_hardware_regions(mem_regions, mem_regions_sz);
1300 	}
1301 	if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0)
1302 		physmem_exclude_regions(mem_regions, mem_regions_sz,
1303 		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1304 #endif
1305 
1306 	/* Exclude the EFI framebuffer from our view of physical memory. */
1307 	efifb = (struct efi_fb *)preload_search_info(kmdp,
1308 	    MODINFO_METADATA | MODINFOMD_EFI_FB);
1309 	if (efifb != NULL)
1310 		physmem_exclude_region(efifb->fb_addr, efifb->fb_size,
1311 		    EXFLAG_NOALLOC);
1312 
1313 	/* Set the pcpu data, this is needed by pmap_bootstrap */
1314 	pcpup = &__pcpu[0];
1315 	pcpu_init(pcpup, 0, sizeof(struct pcpu));
1316 
1317 	/*
1318 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
1319 	 * loaded when entering the kernel from userland.
1320 	 */
1321 	__asm __volatile(
1322 	    "mov x18, %0 \n"
1323 	    "msr tpidr_el1, %0" :: "r"(pcpup));
1324 
1325 	PCPU_SET(curthread, &thread0);
1326 	PCPU_SET(midr, get_midr());
1327 
1328 	/* Do basic tuning, hz etc */
1329 	init_param1();
1330 
1331 	cache_setup();
1332 	pan_setup();
1333 
1334 	/* Bootstrap enough of pmap  to enter the kernel proper */
1335 	pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1336 	    KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1337 	/* Exclude entries neexed in teh DMAP region, but not phys_avail */
1338 	if (efihdr != NULL)
1339 		exclude_efi_map_entries(efihdr);
1340 	physmem_init_kernel_globals();
1341 
1342 	devmap_bootstrap(0, NULL);
1343 
1344 	valid = bus_probe();
1345 
1346 	cninit();
1347 	set_ttbr0(abp->kern_ttbr0);
1348 	cpu_tlb_flushID();
1349 
1350 	if (!valid)
1351 		panic("Invalid bus configuration: %s",
1352 		    kern_getenv("kern.cfg.order"));
1353 
1354 	/*
1355 	 * Dump the boot metadata. We have to wait for cninit() since console
1356 	 * output is required. If it's grossly incorrect the kernel will never
1357 	 * make it this far.
1358 	 */
1359 	if (getenv_is_true("debug.dump_modinfo_at_boot"))
1360 		preload_dump();
1361 
1362 	init_proc0(abp->kern_stack);
1363 	msgbufinit(msgbufp, msgbufsize);
1364 	mutex_init();
1365 	init_param2(physmem);
1366 
1367 	dbg_init();
1368 	kdb_init();
1369 	pan_enable();
1370 
1371 	kcsan_cpu_init(0);
1372 
1373 	env = kern_getenv("kernelname");
1374 	if (env != NULL)
1375 		strlcpy(kernelname, env, sizeof(kernelname));
1376 
1377 #ifdef FDT
1378 	if (arm64_bus_method == ARM64_BUS_FDT) {
1379 		root = OF_finddevice("/");
1380 		if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) {
1381 			if (strcmp(LINUX_DTS_VERSION, dts_version) != 0)
1382 				printf("WARNING: DTB version is %s while kernel expects %s, "
1383 				    "please update the DTB in the ESP\n",
1384 				    dts_version,
1385 				    LINUX_DTS_VERSION);
1386 		} else {
1387 			printf("WARNING: Cannot find freebsd,dts-version property, "
1388 			    "cannot check DTB compliance\n");
1389 		}
1390 	}
1391 #endif
1392 
1393 	if (boothowto & RB_VERBOSE) {
1394 		if (efihdr != NULL)
1395 			print_efi_map_entries(efihdr);
1396 		physmem_print_tables();
1397 	}
1398 
1399 	early_boot = 0;
1400 
1401 	TSEXIT();
1402 }
1403 
1404 void
1405 dbg_init(void)
1406 {
1407 
1408 	/* Clear OS lock */
1409 	WRITE_SPECIALREG(oslar_el1, 0);
1410 
1411 	/* This permits DDB to use debug registers for watchpoints. */
1412 	dbg_monitor_init();
1413 
1414 	/* TODO: Eventually will need to initialize debug registers here. */
1415 }
1416 
1417 #ifdef DDB
1418 #include <ddb/ddb.h>
1419 
1420 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1421 {
1422 #define	PRINT_REG(reg)	\
1423     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1424 
1425 	PRINT_REG(actlr_el1);
1426 	PRINT_REG(afsr0_el1);
1427 	PRINT_REG(afsr1_el1);
1428 	PRINT_REG(aidr_el1);
1429 	PRINT_REG(amair_el1);
1430 	PRINT_REG(ccsidr_el1);
1431 	PRINT_REG(clidr_el1);
1432 	PRINT_REG(contextidr_el1);
1433 	PRINT_REG(cpacr_el1);
1434 	PRINT_REG(csselr_el1);
1435 	PRINT_REG(ctr_el0);
1436 	PRINT_REG(currentel);
1437 	PRINT_REG(daif);
1438 	PRINT_REG(dczid_el0);
1439 	PRINT_REG(elr_el1);
1440 	PRINT_REG(esr_el1);
1441 	PRINT_REG(far_el1);
1442 #if 0
1443 	/* ARM64TODO: Enable VFP before reading floating-point registers */
1444 	PRINT_REG(fpcr);
1445 	PRINT_REG(fpsr);
1446 #endif
1447 	PRINT_REG(id_aa64afr0_el1);
1448 	PRINT_REG(id_aa64afr1_el1);
1449 	PRINT_REG(id_aa64dfr0_el1);
1450 	PRINT_REG(id_aa64dfr1_el1);
1451 	PRINT_REG(id_aa64isar0_el1);
1452 	PRINT_REG(id_aa64isar1_el1);
1453 	PRINT_REG(id_aa64pfr0_el1);
1454 	PRINT_REG(id_aa64pfr1_el1);
1455 	PRINT_REG(id_afr0_el1);
1456 	PRINT_REG(id_dfr0_el1);
1457 	PRINT_REG(id_isar0_el1);
1458 	PRINT_REG(id_isar1_el1);
1459 	PRINT_REG(id_isar2_el1);
1460 	PRINT_REG(id_isar3_el1);
1461 	PRINT_REG(id_isar4_el1);
1462 	PRINT_REG(id_isar5_el1);
1463 	PRINT_REG(id_mmfr0_el1);
1464 	PRINT_REG(id_mmfr1_el1);
1465 	PRINT_REG(id_mmfr2_el1);
1466 	PRINT_REG(id_mmfr3_el1);
1467 #if 0
1468 	/* Missing from llvm */
1469 	PRINT_REG(id_mmfr4_el1);
1470 #endif
1471 	PRINT_REG(id_pfr0_el1);
1472 	PRINT_REG(id_pfr1_el1);
1473 	PRINT_REG(isr_el1);
1474 	PRINT_REG(mair_el1);
1475 	PRINT_REG(midr_el1);
1476 	PRINT_REG(mpidr_el1);
1477 	PRINT_REG(mvfr0_el1);
1478 	PRINT_REG(mvfr1_el1);
1479 	PRINT_REG(mvfr2_el1);
1480 	PRINT_REG(revidr_el1);
1481 	PRINT_REG(sctlr_el1);
1482 	PRINT_REG(sp_el0);
1483 	PRINT_REG(spsel);
1484 	PRINT_REG(spsr_el1);
1485 	PRINT_REG(tcr_el1);
1486 	PRINT_REG(tpidr_el0);
1487 	PRINT_REG(tpidr_el1);
1488 	PRINT_REG(tpidrro_el0);
1489 	PRINT_REG(ttbr0_el1);
1490 	PRINT_REG(ttbr1_el1);
1491 	PRINT_REG(vbar_el1);
1492 #undef PRINT_REG
1493 }
1494 
1495 DB_SHOW_COMMAND(vtop, db_show_vtop)
1496 {
1497 	uint64_t phys;
1498 
1499 	if (have_addr) {
1500 		phys = arm64_address_translate_s1e1r(addr);
1501 		db_printf("EL1 physical address reg (read):  0x%016lx\n", phys);
1502 		phys = arm64_address_translate_s1e1w(addr);
1503 		db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1504 		phys = arm64_address_translate_s1e0r(addr);
1505 		db_printf("EL0 physical address reg (read):  0x%016lx\n", phys);
1506 		phys = arm64_address_translate_s1e0w(addr);
1507 		db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1508 	} else
1509 		db_printf("show vtop <virt_addr>\n");
1510 }
1511 #endif
1512