xref: /freebsd/sys/arm64/arm64/machdep.c (revision 325151a3)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_platform.h"
29 #include "opt_ddb.h"
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/buf.h>
37 #include <sys/bus.h>
38 #include <sys/cons.h>
39 #include <sys/cpu.h>
40 #include <sys/efi.h>
41 #include <sys/exec.h>
42 #include <sys/imgact.h>
43 #include <sys/kdb.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/linker.h>
47 #include <sys/msgbuf.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/ptrace.h>
51 #include <sys/reboot.h>
52 #include <sys/rwlock.h>
53 #include <sys/sched.h>
54 #include <sys/signalvar.h>
55 #include <sys/syscallsubr.h>
56 #include <sys/sysent.h>
57 #include <sys/sysproto.h>
58 #include <sys/ucontext.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_object.h>
63 #include <vm/vm_page.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h>
66 #include <vm/vm_pager.h>
67 
68 #include <machine/armreg.h>
69 #include <machine/cpu.h>
70 #include <machine/debug_monitor.h>
71 #include <machine/kdb.h>
72 #include <machine/devmap.h>
73 #include <machine/machdep.h>
74 #include <machine/metadata.h>
75 #include <machine/pcb.h>
76 #include <machine/reg.h>
77 #include <machine/vmparam.h>
78 
79 #ifdef VFP
80 #include <machine/vfp.h>
81 #endif
82 
83 #ifdef FDT
84 #include <dev/fdt/fdt_common.h>
85 #include <dev/ofw/openfirm.h>
86 #endif
87 
88 struct pcpu __pcpu[MAXCPU];
89 
90 static struct trapframe proc0_tf;
91 
92 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
93 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
94 
95 int early_boot = 1;
96 int cold = 1;
97 long realmem = 0;
98 long Maxmem = 0;
99 
100 #define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
101 vm_paddr_t physmap[PHYSMAP_SIZE];
102 u_int physmap_idx;
103 
104 struct kva_md_info kmi;
105 
106 int64_t dcache_line_size;	/* The minimum D cache line size */
107 int64_t icache_line_size;	/* The minimum I cache line size */
108 int64_t idcache_line_size;	/* The minimum cache line size */
109 
110 static void
111 cpu_startup(void *dummy)
112 {
113 
114 	identify_cpu();
115 
116 	vm_ksubmap_init(&kmi);
117 	bufinit();
118 	vm_pager_bufferinit();
119 }
120 
121 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
122 
123 int
124 cpu_idle_wakeup(int cpu)
125 {
126 
127 	return (0);
128 }
129 
130 void
131 bzero(void *buf, size_t len)
132 {
133 	uint8_t *p;
134 
135 	p = buf;
136 	while(len-- > 0)
137 		*p++ = 0;
138 }
139 
140 int
141 fill_regs(struct thread *td, struct reg *regs)
142 {
143 	struct trapframe *frame;
144 
145 	frame = td->td_frame;
146 	regs->sp = frame->tf_sp;
147 	regs->lr = frame->tf_lr;
148 	regs->elr = frame->tf_elr;
149 	regs->spsr = frame->tf_spsr;
150 
151 	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
152 
153 	return (0);
154 }
155 
156 int
157 set_regs(struct thread *td, struct reg *regs)
158 {
159 	struct trapframe *frame;
160 
161 	frame = td->td_frame;
162 	frame->tf_sp = regs->sp;
163 	frame->tf_lr = regs->lr;
164 	frame->tf_elr = regs->elr;
165 	frame->tf_spsr = regs->spsr;
166 
167 	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
168 
169 	return (0);
170 }
171 
172 int
173 fill_fpregs(struct thread *td, struct fpreg *regs)
174 {
175 #ifdef VFP
176 	struct pcb *pcb;
177 
178 	pcb = td->td_pcb;
179 	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
180 		/*
181 		 * If we have just been running VFP instructions we will
182 		 * need to save the state to memcpy it below.
183 		 */
184 		vfp_save_state(td, pcb);
185 
186 		memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
187 		regs->fp_cr = pcb->pcb_fpcr;
188 		regs->fp_sr = pcb->pcb_fpsr;
189 	} else
190 #endif
191 		memset(regs->fp_q, 0, sizeof(regs->fp_q));
192 	return (0);
193 }
194 
195 int
196 set_fpregs(struct thread *td, struct fpreg *regs)
197 {
198 #ifdef VFP
199 	struct pcb *pcb;
200 
201 	pcb = td->td_pcb;
202 	memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
203 	pcb->pcb_fpcr = regs->fp_cr;
204 	pcb->pcb_fpsr = regs->fp_sr;
205 #endif
206 	return (0);
207 }
208 
209 int
210 fill_dbregs(struct thread *td, struct dbreg *regs)
211 {
212 
213 	panic("ARM64TODO: fill_dbregs");
214 }
215 
216 int
217 set_dbregs(struct thread *td, struct dbreg *regs)
218 {
219 
220 	panic("ARM64TODO: set_dbregs");
221 }
222 
223 int
224 ptrace_set_pc(struct thread *td, u_long addr)
225 {
226 
227 	panic("ARM64TODO: ptrace_set_pc");
228 	return (0);
229 }
230 
231 int
232 ptrace_single_step(struct thread *td)
233 {
234 
235 	/* TODO; */
236 	return (0);
237 }
238 
239 int
240 ptrace_clear_single_step(struct thread *td)
241 {
242 
243 	/* TODO; */
244 	return (0);
245 }
246 
247 void
248 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
249 {
250 	struct trapframe *tf = td->td_frame;
251 
252 	memset(tf, 0, sizeof(struct trapframe));
253 
254 	/*
255 	 * We need to set x0 for init as it doesn't call
256 	 * cpu_set_syscall_retval to copy the value. We also
257 	 * need to set td_retval for the cases where we do.
258 	 */
259 	tf->tf_x[0] = td->td_retval[0] = stack;
260 	tf->tf_sp = STACKALIGN(stack);
261 	tf->tf_lr = imgp->entry_addr;
262 	tf->tf_elr = imgp->entry_addr;
263 }
264 
265 /* Sanity check these are the same size, they will be memcpy'd to and fro */
266 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
267     sizeof((struct gpregs *)0)->gp_x);
268 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
269     sizeof((struct reg *)0)->x);
270 
271 int
272 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
273 {
274 	struct trapframe *tf = td->td_frame;
275 
276 	if (clear_ret & GET_MC_CLEAR_RET) {
277 		mcp->mc_gpregs.gp_x[0] = 0;
278 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
279 	} else {
280 		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
281 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
282 	}
283 
284 	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
285 	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
286 
287 	mcp->mc_gpregs.gp_sp = tf->tf_sp;
288 	mcp->mc_gpregs.gp_lr = tf->tf_lr;
289 	mcp->mc_gpregs.gp_elr = tf->tf_elr;
290 
291 	return (0);
292 }
293 
294 int
295 set_mcontext(struct thread *td, mcontext_t *mcp)
296 {
297 	struct trapframe *tf = td->td_frame;
298 
299 	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
300 
301 	tf->tf_sp = mcp->mc_gpregs.gp_sp;
302 	tf->tf_lr = mcp->mc_gpregs.gp_lr;
303 	tf->tf_elr = mcp->mc_gpregs.gp_elr;
304 	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
305 
306 	return (0);
307 }
308 
309 static void
310 get_fpcontext(struct thread *td, mcontext_t *mcp)
311 {
312 #ifdef VFP
313 	struct pcb *curpcb;
314 
315 	critical_enter();
316 
317 	curpcb = curthread->td_pcb;
318 
319 	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
320 		/*
321 		 * If we have just been running VFP instructions we will
322 		 * need to save the state to memcpy it below.
323 		 */
324 		vfp_save_state(td, curpcb);
325 
326 		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
327 		    sizeof(mcp->mc_fpregs));
328 		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
329 		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
330 		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
331 		mcp->mc_flags |= _MC_FP_VALID;
332 	}
333 
334 	critical_exit();
335 #endif
336 }
337 
338 static void
339 set_fpcontext(struct thread *td, mcontext_t *mcp)
340 {
341 #ifdef VFP
342 	struct pcb *curpcb;
343 
344 	critical_enter();
345 
346 	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
347 		curpcb = curthread->td_pcb;
348 
349 		/*
350 		 * Discard any vfp state for the current thread, we
351 		 * are about to override it.
352 		 */
353 		vfp_discard(td);
354 
355 		memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
356 		    sizeof(mcp->mc_fpregs));
357 		curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
358 		curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
359 		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
360 	}
361 
362 	critical_exit();
363 #endif
364 }
365 
366 void
367 cpu_idle(int busy)
368 {
369 
370 	spinlock_enter();
371 	if (!busy)
372 		cpu_idleclock();
373 	if (!sched_runnable())
374 		__asm __volatile(
375 		    "dsb sy \n"
376 		    "wfi    \n");
377 	if (!busy)
378 		cpu_activeclock();
379 	spinlock_exit();
380 }
381 
382 void
383 cpu_halt(void)
384 {
385 
386 	/* We should have shutdown by now, if not enter a low power sleep */
387 	intr_disable();
388 	while (1) {
389 		__asm __volatile("wfi");
390 	}
391 }
392 
393 /*
394  * Flush the D-cache for non-DMA I/O so that the I-cache can
395  * be made coherent later.
396  */
397 void
398 cpu_flush_dcache(void *ptr, size_t len)
399 {
400 
401 	/* ARM64TODO TBD */
402 }
403 
404 /* Get current clock frequency for the given CPU ID. */
405 int
406 cpu_est_clockrate(int cpu_id, uint64_t *rate)
407 {
408 
409 	panic("ARM64TODO: cpu_est_clockrate");
410 }
411 
412 void
413 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
414 {
415 
416 	pcpu->pc_acpi_id = 0xffffffff;
417 }
418 
419 void
420 spinlock_enter(void)
421 {
422 	struct thread *td;
423 	register_t daif;
424 
425 	td = curthread;
426 	if (td->td_md.md_spinlock_count == 0) {
427 		daif = intr_disable();
428 		td->td_md.md_spinlock_count = 1;
429 		td->td_md.md_saved_daif = daif;
430 	} else
431 		td->td_md.md_spinlock_count++;
432 	critical_enter();
433 }
434 
435 void
436 spinlock_exit(void)
437 {
438 	struct thread *td;
439 	register_t daif;
440 
441 	td = curthread;
442 	critical_exit();
443 	daif = td->td_md.md_saved_daif;
444 	td->td_md.md_spinlock_count--;
445 	if (td->td_md.md_spinlock_count == 0)
446 		intr_restore(daif);
447 }
448 
449 #ifndef	_SYS_SYSPROTO_H_
450 struct sigreturn_args {
451 	ucontext_t *ucp;
452 };
453 #endif
454 
455 int
456 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
457 {
458 	ucontext_t uc;
459 	uint32_t spsr;
460 
461 	if (uap == NULL)
462 		return (EFAULT);
463 	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
464 		return (EFAULT);
465 
466 	spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
467 	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
468 	    (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
469 		return (EINVAL);
470 
471 	set_mcontext(td, &uc.uc_mcontext);
472 	set_fpcontext(td, &uc.uc_mcontext);
473 
474 	/* Restore signal mask. */
475 	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
476 
477 	return (EJUSTRETURN);
478 }
479 
480 /*
481  * Construct a PCB from a trapframe. This is called from kdb_trap() where
482  * we want to start a backtrace from the function that caused us to enter
483  * the debugger. We have the context in the trapframe, but base the trace
484  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
485  * enough for a backtrace.
486  */
487 void
488 makectx(struct trapframe *tf, struct pcb *pcb)
489 {
490 	int i;
491 
492 	for (i = 0; i < PCB_LR; i++)
493 		pcb->pcb_x[i] = tf->tf_x[i];
494 
495 	pcb->pcb_x[PCB_LR] = tf->tf_lr;
496 	pcb->pcb_pc = tf->tf_elr;
497 	pcb->pcb_sp = tf->tf_sp;
498 }
499 
500 void
501 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
502 {
503 	struct thread *td;
504 	struct proc *p;
505 	struct trapframe *tf;
506 	struct sigframe *fp, frame;
507 	struct sigacts *psp;
508 	int code, onstack, sig;
509 
510 	td = curthread;
511 	p = td->td_proc;
512 	PROC_LOCK_ASSERT(p, MA_OWNED);
513 
514 	sig = ksi->ksi_signo;
515 	code = ksi->ksi_code;
516 	psp = p->p_sigacts;
517 	mtx_assert(&psp->ps_mtx, MA_OWNED);
518 
519 	tf = td->td_frame;
520 	onstack = sigonstack(tf->tf_sp);
521 
522 	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
523 	    catcher, sig);
524 
525 	/* Allocate and validate space for the signal handler context. */
526 	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
527 	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
528 		fp = (struct sigframe *)(td->td_sigstk.ss_sp +
529 		    td->td_sigstk.ss_size);
530 #if defined(COMPAT_43)
531 		td->td_sigstk.ss_flags |= SS_ONSTACK;
532 #endif
533 	} else {
534 		fp = (struct sigframe *)td->td_frame->tf_sp;
535 	}
536 
537 	/* Make room, keeping the stack aligned */
538 	fp--;
539 	fp = (struct sigframe *)STACKALIGN(fp);
540 
541 	/* Fill in the frame to copy out */
542 	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
543 	get_fpcontext(td, &frame.sf_uc.uc_mcontext);
544 	frame.sf_si = ksi->ksi_info;
545 	frame.sf_uc.uc_sigmask = *mask;
546 	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
547 	    ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
548 	frame.sf_uc.uc_stack = td->td_sigstk;
549 	mtx_unlock(&psp->ps_mtx);
550 	PROC_UNLOCK(td->td_proc);
551 
552 	/* Copy the sigframe out to the user's stack. */
553 	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
554 		/* Process has trashed its stack. Kill it. */
555 		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
556 		PROC_LOCK(p);
557 		sigexit(td, SIGILL);
558 	}
559 
560 	tf->tf_x[0]= sig;
561 	tf->tf_x[1] = (register_t)&fp->sf_si;
562 	tf->tf_x[2] = (register_t)&fp->sf_uc;
563 
564 	tf->tf_elr = (register_t)catcher;
565 	tf->tf_sp = (register_t)fp;
566 	tf->tf_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
567 
568 	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
569 	    tf->tf_sp);
570 
571 	PROC_LOCK(p);
572 	mtx_lock(&psp->ps_mtx);
573 }
574 
575 static void
576 init_proc0(vm_offset_t kstack)
577 {
578 	struct pcpu *pcpup = &__pcpu[0];
579 
580 	proc_linkup0(&proc0, &thread0);
581 	thread0.td_kstack = kstack;
582 	thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
583 	thread0.td_pcb->pcb_fpflags = 0;
584 	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
585 	thread0.td_frame = &proc0_tf;
586 	pcpup->pc_curpcb = thread0.td_pcb;
587 }
588 
589 typedef struct {
590 	uint32_t type;
591 	uint64_t phys_start;
592 	uint64_t virt_start;
593 	uint64_t num_pages;
594 	uint64_t attr;
595 } EFI_MEMORY_DESCRIPTOR;
596 
597 static int
598 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
599     u_int *physmap_idxp)
600 {
601 	u_int i, insert_idx, _physmap_idx;
602 
603 	_physmap_idx = *physmap_idxp;
604 
605 	if (length == 0)
606 		return (1);
607 
608 	/*
609 	 * Find insertion point while checking for overlap.  Start off by
610 	 * assuming the new entry will be added to the end.
611 	 */
612 	insert_idx = _physmap_idx;
613 	for (i = 0; i <= _physmap_idx; i += 2) {
614 		if (base < physmap[i + 1]) {
615 			if (base + length <= physmap[i]) {
616 				insert_idx = i;
617 				break;
618 			}
619 			if (boothowto & RB_VERBOSE)
620 				printf(
621 		    "Overlapping memory regions, ignoring second region\n");
622 			return (1);
623 		}
624 	}
625 
626 	/* See if we can prepend to the next entry. */
627 	if (insert_idx <= _physmap_idx &&
628 	    base + length == physmap[insert_idx]) {
629 		physmap[insert_idx] = base;
630 		return (1);
631 	}
632 
633 	/* See if we can append to the previous entry. */
634 	if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
635 		physmap[insert_idx - 1] += length;
636 		return (1);
637 	}
638 
639 	_physmap_idx += 2;
640 	*physmap_idxp = _physmap_idx;
641 	if (_physmap_idx == PHYSMAP_SIZE) {
642 		printf(
643 		"Too many segments in the physical address map, giving up\n");
644 		return (0);
645 	}
646 
647 	/*
648 	 * Move the last 'N' entries down to make room for the new
649 	 * entry if needed.
650 	 */
651 	for (i = _physmap_idx; i > insert_idx; i -= 2) {
652 		physmap[i] = physmap[i - 2];
653 		physmap[i + 1] = physmap[i - 1];
654 	}
655 
656 	/* Insert the new entry. */
657 	physmap[insert_idx] = base;
658 	physmap[insert_idx + 1] = base + length;
659 	return (1);
660 }
661 
662 #define efi_next_descriptor(ptr, size) \
663 	((struct efi_md *)(((uint8_t *) ptr) + size))
664 
665 static void
666 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
667     u_int *physmap_idxp)
668 {
669 	struct efi_md *map, *p;
670 	const char *type;
671 	size_t efisz;
672 	int ndesc, i;
673 
674 	static const char *types[] = {
675 		"Reserved",
676 		"LoaderCode",
677 		"LoaderData",
678 		"BootServicesCode",
679 		"BootServicesData",
680 		"RuntimeServicesCode",
681 		"RuntimeServicesData",
682 		"ConventionalMemory",
683 		"UnusableMemory",
684 		"ACPIReclaimMemory",
685 		"ACPIMemoryNVS",
686 		"MemoryMappedIO",
687 		"MemoryMappedIOPortSpace",
688 		"PalCode"
689 	};
690 
691 	/*
692 	 * Memory map data provided by UEFI via the GetMemoryMap
693 	 * Boot Services API.
694 	 */
695 	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
696 	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
697 
698 	if (efihdr->descriptor_size == 0)
699 		return;
700 	ndesc = efihdr->memory_size / efihdr->descriptor_size;
701 
702 	if (boothowto & RB_VERBOSE)
703 		printf("%23s %12s %12s %8s %4s\n",
704 		    "Type", "Physical", "Virtual", "#Pages", "Attr");
705 
706 	for (i = 0, p = map; i < ndesc; i++,
707 	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
708 		if (boothowto & RB_VERBOSE) {
709 			if (p->md_type <= EFI_MD_TYPE_PALCODE)
710 				type = types[p->md_type];
711 			else
712 				type = "<INVALID>";
713 			printf("%23s %012lx %12p %08lx ", type, p->md_phys,
714 			    p->md_virt, p->md_pages);
715 			if (p->md_attr & EFI_MD_ATTR_UC)
716 				printf("UC ");
717 			if (p->md_attr & EFI_MD_ATTR_WC)
718 				printf("WC ");
719 			if (p->md_attr & EFI_MD_ATTR_WT)
720 				printf("WT ");
721 			if (p->md_attr & EFI_MD_ATTR_WB)
722 				printf("WB ");
723 			if (p->md_attr & EFI_MD_ATTR_UCE)
724 				printf("UCE ");
725 			if (p->md_attr & EFI_MD_ATTR_WP)
726 				printf("WP ");
727 			if (p->md_attr & EFI_MD_ATTR_RP)
728 				printf("RP ");
729 			if (p->md_attr & EFI_MD_ATTR_XP)
730 				printf("XP ");
731 			if (p->md_attr & EFI_MD_ATTR_RT)
732 				printf("RUNTIME");
733 			printf("\n");
734 		}
735 
736 		switch (p->md_type) {
737 		case EFI_MD_TYPE_CODE:
738 		case EFI_MD_TYPE_DATA:
739 		case EFI_MD_TYPE_BS_CODE:
740 		case EFI_MD_TYPE_BS_DATA:
741 		case EFI_MD_TYPE_FREE:
742 			/*
743 			 * We're allowed to use any entry with these types.
744 			 */
745 			break;
746 		default:
747 			continue;
748 		}
749 
750 		if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
751 		    physmap, physmap_idxp))
752 			break;
753 	}
754 }
755 
756 #ifdef FDT
757 static void
758 try_load_dtb(caddr_t kmdp)
759 {
760 	vm_offset_t dtbp;
761 
762 	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
763 	if (dtbp == (vm_offset_t)NULL) {
764 		printf("ERROR loading DTB\n");
765 		return;
766 	}
767 
768 	if (OF_install(OFW_FDT, 0) == FALSE)
769 		panic("Cannot install FDT");
770 
771 	if (OF_init((void *)dtbp) != 0)
772 		panic("OF_init failed with the found device tree");
773 }
774 #endif
775 
776 static void
777 cache_setup(void)
778 {
779 	int dcache_line_shift, icache_line_shift;
780 	uint32_t ctr_el0;
781 
782 	ctr_el0 = READ_SPECIALREG(ctr_el0);
783 
784 	/* Read the log2 words in each D cache line */
785 	dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
786 	/* Get the D cache line size */
787 	dcache_line_size = sizeof(int) << dcache_line_shift;
788 
789 	/* And the same for the I cache */
790 	icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
791 	icache_line_size = sizeof(int) << icache_line_shift;
792 
793 	idcache_line_size = MIN(dcache_line_size, icache_line_size);
794 }
795 
796 void
797 initarm(struct arm64_bootparams *abp)
798 {
799 	struct efi_map_header *efihdr;
800 	struct pcpu *pcpup;
801 	vm_offset_t lastaddr;
802 	caddr_t kmdp;
803 	vm_paddr_t mem_len;
804 	int i;
805 
806 	/* Set the module data location */
807 	preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
808 
809 	/* Find the kernel address */
810 	kmdp = preload_search_by_type("elf kernel");
811 	if (kmdp == NULL)
812 		kmdp = preload_search_by_type("elf64 kernel");
813 
814 	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
815 	kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
816 
817 #ifdef FDT
818 	try_load_dtb(kmdp);
819 #endif
820 
821 	/* Find the address to start allocating from */
822 	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
823 
824 	/* Load the physical memory ranges */
825 	physmap_idx = 0;
826 	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
827 	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
828 	add_efi_map_entries(efihdr, physmap, &physmap_idx);
829 
830 	/* Print the memory map */
831 	mem_len = 0;
832 	for (i = 0; i < physmap_idx; i += 2) {
833 		dump_avail[i] = physmap[i];
834 		dump_avail[i + 1] = physmap[i + 1];
835 		mem_len += physmap[i + 1] - physmap[i];
836 	}
837 	dump_avail[i] = 0;
838 	dump_avail[i + 1] = 0;
839 
840 	/* Set the pcpu data, this is needed by pmap_bootstrap */
841 	pcpup = &__pcpu[0];
842 	pcpu_init(pcpup, 0, sizeof(struct pcpu));
843 
844 	/*
845 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
846 	 * loaded when entering the kernel from userland.
847 	 */
848 	__asm __volatile(
849 	    "mov x18, %0 \n"
850 	    "msr tpidr_el1, %0" :: "r"(pcpup));
851 
852 	PCPU_SET(curthread, &thread0);
853 
854 	/* Do basic tuning, hz etc */
855 	init_param1();
856 
857 	cache_setup();
858 
859 	/* Bootstrap enough of pmap  to enter the kernel proper */
860 	pmap_bootstrap(abp->kern_l1pt, KERNBASE - abp->kern_delta,
861 	    lastaddr - KERNBASE);
862 
863 	arm_devmap_bootstrap(0, NULL);
864 
865 	cninit();
866 
867 	init_proc0(abp->kern_stack);
868 	msgbufinit(msgbufp, msgbufsize);
869 	mutex_init();
870 	init_param2(physmem);
871 
872 	dbg_monitor_init();
873 	kdb_init();
874 
875 	early_boot = 0;
876 }
877 
878 #ifdef DDB
879 #include <ddb/ddb.h>
880 
881 DB_SHOW_COMMAND(specialregs, db_show_spregs)
882 {
883 #define	PRINT_REG(reg)	\
884     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
885 
886 	PRINT_REG(actlr_el1);
887 	PRINT_REG(afsr0_el1);
888 	PRINT_REG(afsr1_el1);
889 	PRINT_REG(aidr_el1);
890 	PRINT_REG(amair_el1);
891 	PRINT_REG(ccsidr_el1);
892 	PRINT_REG(clidr_el1);
893 	PRINT_REG(contextidr_el1);
894 	PRINT_REG(cpacr_el1);
895 	PRINT_REG(csselr_el1);
896 	PRINT_REG(ctr_el0);
897 	PRINT_REG(currentel);
898 	PRINT_REG(daif);
899 	PRINT_REG(dczid_el0);
900 	PRINT_REG(elr_el1);
901 	PRINT_REG(esr_el1);
902 	PRINT_REG(far_el1);
903 #if 0
904 	/* ARM64TODO: Enable VFP before reading floating-point registers */
905 	PRINT_REG(fpcr);
906 	PRINT_REG(fpsr);
907 #endif
908 	PRINT_REG(id_aa64afr0_el1);
909 	PRINT_REG(id_aa64afr1_el1);
910 	PRINT_REG(id_aa64dfr0_el1);
911 	PRINT_REG(id_aa64dfr1_el1);
912 	PRINT_REG(id_aa64isar0_el1);
913 	PRINT_REG(id_aa64isar1_el1);
914 	PRINT_REG(id_aa64pfr0_el1);
915 	PRINT_REG(id_aa64pfr1_el1);
916 	PRINT_REG(id_afr0_el1);
917 	PRINT_REG(id_dfr0_el1);
918 	PRINT_REG(id_isar0_el1);
919 	PRINT_REG(id_isar1_el1);
920 	PRINT_REG(id_isar2_el1);
921 	PRINT_REG(id_isar3_el1);
922 	PRINT_REG(id_isar4_el1);
923 	PRINT_REG(id_isar5_el1);
924 	PRINT_REG(id_mmfr0_el1);
925 	PRINT_REG(id_mmfr1_el1);
926 	PRINT_REG(id_mmfr2_el1);
927 	PRINT_REG(id_mmfr3_el1);
928 #if 0
929 	/* Missing from llvm */
930 	PRINT_REG(id_mmfr4_el1);
931 #endif
932 	PRINT_REG(id_pfr0_el1);
933 	PRINT_REG(id_pfr1_el1);
934 	PRINT_REG(isr_el1);
935 	PRINT_REG(mair_el1);
936 	PRINT_REG(midr_el1);
937 	PRINT_REG(mpidr_el1);
938 	PRINT_REG(mvfr0_el1);
939 	PRINT_REG(mvfr1_el1);
940 	PRINT_REG(mvfr2_el1);
941 	PRINT_REG(revidr_el1);
942 	PRINT_REG(sctlr_el1);
943 	PRINT_REG(sp_el0);
944 	PRINT_REG(spsel);
945 	PRINT_REG(spsr_el1);
946 	PRINT_REG(tcr_el1);
947 	PRINT_REG(tpidr_el0);
948 	PRINT_REG(tpidr_el1);
949 	PRINT_REG(tpidrro_el0);
950 	PRINT_REG(ttbr0_el1);
951 	PRINT_REG(ttbr1_el1);
952 	PRINT_REG(vbar_el1);
953 #undef PRINT_REG
954 }
955 
956 DB_SHOW_COMMAND(vtop, db_show_vtop)
957 {
958 	uint64_t phys;
959 
960 	if (have_addr) {
961 		phys = arm64_address_translate_s1e1r(addr);
962 		db_printf("Physical address reg: 0x%016lx\n", phys);
963 	} else
964 		db_printf("show vtop <virt_addr>\n");
965 }
966 #endif
967