xref: /freebsd/sys/arm64/arm64/machdep.c (revision acc1a9ef)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_platform.h"
29 #include "opt_ddb.h"
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/buf.h>
37 #include <sys/bus.h>
38 #include <sys/cons.h>
39 #include <sys/cpu.h>
40 #include <sys/efi.h>
41 #include <sys/exec.h>
42 #include <sys/imgact.h>
43 #include <sys/kdb.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/linker.h>
47 #include <sys/msgbuf.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/ptrace.h>
51 #include <sys/reboot.h>
52 #include <sys/rwlock.h>
53 #include <sys/sched.h>
54 #include <sys/signalvar.h>
55 #include <sys/syscallsubr.h>
56 #include <sys/sysent.h>
57 #include <sys/sysproto.h>
58 #include <sys/ucontext.h>
59 #include <sys/vdso.h>
60 
61 #include <vm/vm.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_pager.h>
68 
69 #include <machine/armreg.h>
70 #include <machine/cpu.h>
71 #include <machine/debug_monitor.h>
72 #include <machine/kdb.h>
73 #include <machine/devmap.h>
74 #include <machine/machdep.h>
75 #include <machine/metadata.h>
76 #include <machine/md_var.h>
77 #include <machine/pcb.h>
78 #include <machine/reg.h>
79 #include <machine/vmparam.h>
80 
81 #ifdef VFP
82 #include <machine/vfp.h>
83 #endif
84 
85 #ifdef FDT
86 #include <dev/fdt/fdt_common.h>
87 #include <dev/ofw/openfirm.h>
88 #endif
89 
90 struct pcpu __pcpu[MAXCPU];
91 
92 static struct trapframe proc0_tf;
93 
94 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
95 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
96 
97 int early_boot = 1;
98 int cold = 1;
99 long realmem = 0;
100 long Maxmem = 0;
101 
102 #define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
103 vm_paddr_t physmap[PHYSMAP_SIZE];
104 u_int physmap_idx;
105 
106 struct kva_md_info kmi;
107 
108 int64_t dcache_line_size;	/* The minimum D cache line size */
109 int64_t icache_line_size;	/* The minimum I cache line size */
110 int64_t idcache_line_size;	/* The minimum cache line size */
111 
112 static void
113 cpu_startup(void *dummy)
114 {
115 
116 	identify_cpu();
117 
118 	vm_ksubmap_init(&kmi);
119 	bufinit();
120 	vm_pager_bufferinit();
121 }
122 
123 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
124 
125 int
126 cpu_idle_wakeup(int cpu)
127 {
128 
129 	return (0);
130 }
131 
132 void
133 bzero(void *buf, size_t len)
134 {
135 	uint8_t *p;
136 
137 	p = buf;
138 	while(len-- > 0)
139 		*p++ = 0;
140 }
141 
142 int
143 fill_regs(struct thread *td, struct reg *regs)
144 {
145 	struct trapframe *frame;
146 
147 	frame = td->td_frame;
148 	regs->sp = frame->tf_sp;
149 	regs->lr = frame->tf_lr;
150 	regs->elr = frame->tf_elr;
151 	regs->spsr = frame->tf_spsr;
152 
153 	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
154 
155 	return (0);
156 }
157 
158 int
159 set_regs(struct thread *td, struct reg *regs)
160 {
161 	struct trapframe *frame;
162 
163 	frame = td->td_frame;
164 	frame->tf_sp = regs->sp;
165 	frame->tf_lr = regs->lr;
166 	frame->tf_elr = regs->elr;
167 	frame->tf_spsr = regs->spsr;
168 
169 	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
170 
171 	return (0);
172 }
173 
174 int
175 fill_fpregs(struct thread *td, struct fpreg *regs)
176 {
177 #ifdef VFP
178 	struct pcb *pcb;
179 
180 	pcb = td->td_pcb;
181 	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
182 		/*
183 		 * If we have just been running VFP instructions we will
184 		 * need to save the state to memcpy it below.
185 		 */
186 		vfp_save_state(td, pcb);
187 
188 		memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
189 		regs->fp_cr = pcb->pcb_fpcr;
190 		regs->fp_sr = pcb->pcb_fpsr;
191 	} else
192 #endif
193 		memset(regs->fp_q, 0, sizeof(regs->fp_q));
194 	return (0);
195 }
196 
197 int
198 set_fpregs(struct thread *td, struct fpreg *regs)
199 {
200 #ifdef VFP
201 	struct pcb *pcb;
202 
203 	pcb = td->td_pcb;
204 	memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
205 	pcb->pcb_fpcr = regs->fp_cr;
206 	pcb->pcb_fpsr = regs->fp_sr;
207 #endif
208 	return (0);
209 }
210 
211 int
212 fill_dbregs(struct thread *td, struct dbreg *regs)
213 {
214 
215 	panic("ARM64TODO: fill_dbregs");
216 }
217 
218 int
219 set_dbregs(struct thread *td, struct dbreg *regs)
220 {
221 
222 	panic("ARM64TODO: set_dbregs");
223 }
224 
225 int
226 ptrace_set_pc(struct thread *td, u_long addr)
227 {
228 
229 	panic("ARM64TODO: ptrace_set_pc");
230 	return (0);
231 }
232 
233 int
234 ptrace_single_step(struct thread *td)
235 {
236 
237 	td->td_frame->tf_spsr |= PSR_SS;
238 	td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
239 	return (0);
240 }
241 
242 int
243 ptrace_clear_single_step(struct thread *td)
244 {
245 
246 	td->td_frame->tf_spsr &= ~PSR_SS;
247 	td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
248 	return (0);
249 }
250 
251 void
252 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
253 {
254 	struct trapframe *tf = td->td_frame;
255 
256 	memset(tf, 0, sizeof(struct trapframe));
257 
258 	/*
259 	 * We need to set x0 for init as it doesn't call
260 	 * cpu_set_syscall_retval to copy the value. We also
261 	 * need to set td_retval for the cases where we do.
262 	 */
263 	tf->tf_x[0] = td->td_retval[0] = stack;
264 	tf->tf_sp = STACKALIGN(stack);
265 	tf->tf_lr = imgp->entry_addr;
266 	tf->tf_elr = imgp->entry_addr;
267 }
268 
269 /* Sanity check these are the same size, they will be memcpy'd to and fro */
270 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
271     sizeof((struct gpregs *)0)->gp_x);
272 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
273     sizeof((struct reg *)0)->x);
274 
275 int
276 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
277 {
278 	struct trapframe *tf = td->td_frame;
279 
280 	if (clear_ret & GET_MC_CLEAR_RET) {
281 		mcp->mc_gpregs.gp_x[0] = 0;
282 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
283 	} else {
284 		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
285 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
286 	}
287 
288 	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
289 	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
290 
291 	mcp->mc_gpregs.gp_sp = tf->tf_sp;
292 	mcp->mc_gpregs.gp_lr = tf->tf_lr;
293 	mcp->mc_gpregs.gp_elr = tf->tf_elr;
294 
295 	return (0);
296 }
297 
298 int
299 set_mcontext(struct thread *td, mcontext_t *mcp)
300 {
301 	struct trapframe *tf = td->td_frame;
302 
303 	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
304 
305 	tf->tf_sp = mcp->mc_gpregs.gp_sp;
306 	tf->tf_lr = mcp->mc_gpregs.gp_lr;
307 	tf->tf_elr = mcp->mc_gpregs.gp_elr;
308 	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
309 
310 	return (0);
311 }
312 
313 static void
314 get_fpcontext(struct thread *td, mcontext_t *mcp)
315 {
316 #ifdef VFP
317 	struct pcb *curpcb;
318 
319 	critical_enter();
320 
321 	curpcb = curthread->td_pcb;
322 
323 	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
324 		/*
325 		 * If we have just been running VFP instructions we will
326 		 * need to save the state to memcpy it below.
327 		 */
328 		vfp_save_state(td, curpcb);
329 
330 		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
331 		    sizeof(mcp->mc_fpregs));
332 		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
333 		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
334 		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
335 		mcp->mc_flags |= _MC_FP_VALID;
336 	}
337 
338 	critical_exit();
339 #endif
340 }
341 
342 static void
343 set_fpcontext(struct thread *td, mcontext_t *mcp)
344 {
345 #ifdef VFP
346 	struct pcb *curpcb;
347 
348 	critical_enter();
349 
350 	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
351 		curpcb = curthread->td_pcb;
352 
353 		/*
354 		 * Discard any vfp state for the current thread, we
355 		 * are about to override it.
356 		 */
357 		vfp_discard(td);
358 
359 		memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
360 		    sizeof(mcp->mc_fpregs));
361 		curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
362 		curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
363 		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
364 	}
365 
366 	critical_exit();
367 #endif
368 }
369 
370 void
371 cpu_idle(int busy)
372 {
373 
374 	spinlock_enter();
375 	if (!busy)
376 		cpu_idleclock();
377 	if (!sched_runnable())
378 		__asm __volatile(
379 		    "dsb sy \n"
380 		    "wfi    \n");
381 	if (!busy)
382 		cpu_activeclock();
383 	spinlock_exit();
384 }
385 
386 void
387 cpu_halt(void)
388 {
389 
390 	/* We should have shutdown by now, if not enter a low power sleep */
391 	intr_disable();
392 	while (1) {
393 		__asm __volatile("wfi");
394 	}
395 }
396 
397 /*
398  * Flush the D-cache for non-DMA I/O so that the I-cache can
399  * be made coherent later.
400  */
401 void
402 cpu_flush_dcache(void *ptr, size_t len)
403 {
404 
405 	/* ARM64TODO TBD */
406 }
407 
408 /* Get current clock frequency for the given CPU ID. */
409 int
410 cpu_est_clockrate(int cpu_id, uint64_t *rate)
411 {
412 
413 	panic("ARM64TODO: cpu_est_clockrate");
414 }
415 
416 void
417 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
418 {
419 
420 	pcpu->pc_acpi_id = 0xffffffff;
421 }
422 
423 void
424 spinlock_enter(void)
425 {
426 	struct thread *td;
427 	register_t daif;
428 
429 	td = curthread;
430 	if (td->td_md.md_spinlock_count == 0) {
431 		daif = intr_disable();
432 		td->td_md.md_spinlock_count = 1;
433 		td->td_md.md_saved_daif = daif;
434 	} else
435 		td->td_md.md_spinlock_count++;
436 	critical_enter();
437 }
438 
439 void
440 spinlock_exit(void)
441 {
442 	struct thread *td;
443 	register_t daif;
444 
445 	td = curthread;
446 	critical_exit();
447 	daif = td->td_md.md_saved_daif;
448 	td->td_md.md_spinlock_count--;
449 	if (td->td_md.md_spinlock_count == 0)
450 		intr_restore(daif);
451 }
452 
453 #ifndef	_SYS_SYSPROTO_H_
454 struct sigreturn_args {
455 	ucontext_t *ucp;
456 };
457 #endif
458 
459 int
460 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
461 {
462 	ucontext_t uc;
463 	uint32_t spsr;
464 
465 	if (uap == NULL)
466 		return (EFAULT);
467 	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
468 		return (EFAULT);
469 
470 	spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
471 	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
472 	    (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
473 		return (EINVAL);
474 
475 	set_mcontext(td, &uc.uc_mcontext);
476 	set_fpcontext(td, &uc.uc_mcontext);
477 
478 	/* Restore signal mask. */
479 	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
480 
481 	return (EJUSTRETURN);
482 }
483 
484 /*
485  * Construct a PCB from a trapframe. This is called from kdb_trap() where
486  * we want to start a backtrace from the function that caused us to enter
487  * the debugger. We have the context in the trapframe, but base the trace
488  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
489  * enough for a backtrace.
490  */
491 void
492 makectx(struct trapframe *tf, struct pcb *pcb)
493 {
494 	int i;
495 
496 	for (i = 0; i < PCB_LR; i++)
497 		pcb->pcb_x[i] = tf->tf_x[i];
498 
499 	pcb->pcb_x[PCB_LR] = tf->tf_lr;
500 	pcb->pcb_pc = tf->tf_elr;
501 	pcb->pcb_sp = tf->tf_sp;
502 }
503 
504 void
505 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
506 {
507 	struct thread *td;
508 	struct proc *p;
509 	struct trapframe *tf;
510 	struct sigframe *fp, frame;
511 	struct sigacts *psp;
512 	struct sysentvec *sysent;
513 	int code, onstack, sig;
514 
515 	td = curthread;
516 	p = td->td_proc;
517 	PROC_LOCK_ASSERT(p, MA_OWNED);
518 
519 	sig = ksi->ksi_signo;
520 	code = ksi->ksi_code;
521 	psp = p->p_sigacts;
522 	mtx_assert(&psp->ps_mtx, MA_OWNED);
523 
524 	tf = td->td_frame;
525 	onstack = sigonstack(tf->tf_sp);
526 
527 	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
528 	    catcher, sig);
529 
530 	/* Allocate and validate space for the signal handler context. */
531 	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
532 	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
533 		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
534 		    td->td_sigstk.ss_size);
535 #if defined(COMPAT_43)
536 		td->td_sigstk.ss_flags |= SS_ONSTACK;
537 #endif
538 	} else {
539 		fp = (struct sigframe *)td->td_frame->tf_sp;
540 	}
541 
542 	/* Make room, keeping the stack aligned */
543 	fp--;
544 	fp = (struct sigframe *)STACKALIGN(fp);
545 
546 	/* Fill in the frame to copy out */
547 	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
548 	get_fpcontext(td, &frame.sf_uc.uc_mcontext);
549 	frame.sf_si = ksi->ksi_info;
550 	frame.sf_uc.uc_sigmask = *mask;
551 	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
552 	    ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
553 	frame.sf_uc.uc_stack = td->td_sigstk;
554 	mtx_unlock(&psp->ps_mtx);
555 	PROC_UNLOCK(td->td_proc);
556 
557 	/* Copy the sigframe out to the user's stack. */
558 	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
559 		/* Process has trashed its stack. Kill it. */
560 		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
561 		PROC_LOCK(p);
562 		sigexit(td, SIGILL);
563 	}
564 
565 	tf->tf_x[0]= sig;
566 	tf->tf_x[1] = (register_t)&fp->sf_si;
567 	tf->tf_x[2] = (register_t)&fp->sf_uc;
568 
569 	tf->tf_elr = (register_t)catcher;
570 	tf->tf_sp = (register_t)fp;
571 	sysent = p->p_sysent;
572 	if (sysent->sv_sigcode_base != 0)
573 		tf->tf_lr = (register_t)sysent->sv_sigcode_base;
574 	else
575 		tf->tf_lr = (register_t)(sysent->sv_psstrings -
576 		    *(sysent->sv_szsigcode));
577 
578 	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
579 	    tf->tf_sp);
580 
581 	PROC_LOCK(p);
582 	mtx_lock(&psp->ps_mtx);
583 }
584 
585 static void
586 init_proc0(vm_offset_t kstack)
587 {
588 	struct pcpu *pcpup = &__pcpu[0];
589 
590 	proc_linkup0(&proc0, &thread0);
591 	thread0.td_kstack = kstack;
592 	thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
593 	thread0.td_pcb->pcb_fpflags = 0;
594 	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
595 	thread0.td_frame = &proc0_tf;
596 	pcpup->pc_curpcb = thread0.td_pcb;
597 }
598 
599 typedef struct {
600 	uint32_t type;
601 	uint64_t phys_start;
602 	uint64_t virt_start;
603 	uint64_t num_pages;
604 	uint64_t attr;
605 } EFI_MEMORY_DESCRIPTOR;
606 
607 static int
608 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
609     u_int *physmap_idxp)
610 {
611 	u_int i, insert_idx, _physmap_idx;
612 
613 	_physmap_idx = *physmap_idxp;
614 
615 	if (length == 0)
616 		return (1);
617 
618 	/*
619 	 * Find insertion point while checking for overlap.  Start off by
620 	 * assuming the new entry will be added to the end.
621 	 */
622 	insert_idx = _physmap_idx;
623 	for (i = 0; i <= _physmap_idx; i += 2) {
624 		if (base < physmap[i + 1]) {
625 			if (base + length <= physmap[i]) {
626 				insert_idx = i;
627 				break;
628 			}
629 			if (boothowto & RB_VERBOSE)
630 				printf(
631 		    "Overlapping memory regions, ignoring second region\n");
632 			return (1);
633 		}
634 	}
635 
636 	/* See if we can prepend to the next entry. */
637 	if (insert_idx <= _physmap_idx &&
638 	    base + length == physmap[insert_idx]) {
639 		physmap[insert_idx] = base;
640 		return (1);
641 	}
642 
643 	/* See if we can append to the previous entry. */
644 	if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
645 		physmap[insert_idx - 1] += length;
646 		return (1);
647 	}
648 
649 	_physmap_idx += 2;
650 	*physmap_idxp = _physmap_idx;
651 	if (_physmap_idx == PHYSMAP_SIZE) {
652 		printf(
653 		"Too many segments in the physical address map, giving up\n");
654 		return (0);
655 	}
656 
657 	/*
658 	 * Move the last 'N' entries down to make room for the new
659 	 * entry if needed.
660 	 */
661 	for (i = _physmap_idx; i > insert_idx; i -= 2) {
662 		physmap[i] = physmap[i - 2];
663 		physmap[i + 1] = physmap[i - 1];
664 	}
665 
666 	/* Insert the new entry. */
667 	physmap[insert_idx] = base;
668 	physmap[insert_idx + 1] = base + length;
669 	return (1);
670 }
671 
672 #ifdef FDT
673 static void
674 add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
675     u_int *physmap_idxp)
676 {
677 
678 	for (int i = 0; i < mrcnt; i++) {
679 		if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
680 		    physmap_idxp))
681 			break;
682 	}
683 }
684 #endif
685 
686 #define efi_next_descriptor(ptr, size) \
687 	((struct efi_md *)(((uint8_t *) ptr) + size))
688 
689 static void
690 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
691     u_int *physmap_idxp)
692 {
693 	struct efi_md *map, *p;
694 	const char *type;
695 	size_t efisz;
696 	int ndesc, i;
697 
698 	static const char *types[] = {
699 		"Reserved",
700 		"LoaderCode",
701 		"LoaderData",
702 		"BootServicesCode",
703 		"BootServicesData",
704 		"RuntimeServicesCode",
705 		"RuntimeServicesData",
706 		"ConventionalMemory",
707 		"UnusableMemory",
708 		"ACPIReclaimMemory",
709 		"ACPIMemoryNVS",
710 		"MemoryMappedIO",
711 		"MemoryMappedIOPortSpace",
712 		"PalCode"
713 	};
714 
715 	/*
716 	 * Memory map data provided by UEFI via the GetMemoryMap
717 	 * Boot Services API.
718 	 */
719 	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
720 	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
721 
722 	if (efihdr->descriptor_size == 0)
723 		return;
724 	ndesc = efihdr->memory_size / efihdr->descriptor_size;
725 
726 	if (boothowto & RB_VERBOSE)
727 		printf("%23s %12s %12s %8s %4s\n",
728 		    "Type", "Physical", "Virtual", "#Pages", "Attr");
729 
730 	for (i = 0, p = map; i < ndesc; i++,
731 	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
732 		if (boothowto & RB_VERBOSE) {
733 			if (p->md_type <= EFI_MD_TYPE_PALCODE)
734 				type = types[p->md_type];
735 			else
736 				type = "<INVALID>";
737 			printf("%23s %012lx %12p %08lx ", type, p->md_phys,
738 			    p->md_virt, p->md_pages);
739 			if (p->md_attr & EFI_MD_ATTR_UC)
740 				printf("UC ");
741 			if (p->md_attr & EFI_MD_ATTR_WC)
742 				printf("WC ");
743 			if (p->md_attr & EFI_MD_ATTR_WT)
744 				printf("WT ");
745 			if (p->md_attr & EFI_MD_ATTR_WB)
746 				printf("WB ");
747 			if (p->md_attr & EFI_MD_ATTR_UCE)
748 				printf("UCE ");
749 			if (p->md_attr & EFI_MD_ATTR_WP)
750 				printf("WP ");
751 			if (p->md_attr & EFI_MD_ATTR_RP)
752 				printf("RP ");
753 			if (p->md_attr & EFI_MD_ATTR_XP)
754 				printf("XP ");
755 			if (p->md_attr & EFI_MD_ATTR_RT)
756 				printf("RUNTIME");
757 			printf("\n");
758 		}
759 
760 		switch (p->md_type) {
761 		case EFI_MD_TYPE_CODE:
762 		case EFI_MD_TYPE_DATA:
763 		case EFI_MD_TYPE_BS_CODE:
764 		case EFI_MD_TYPE_BS_DATA:
765 		case EFI_MD_TYPE_FREE:
766 			/*
767 			 * We're allowed to use any entry with these types.
768 			 */
769 			break;
770 		default:
771 			continue;
772 		}
773 
774 		if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
775 		    physmap, physmap_idxp))
776 			break;
777 	}
778 }
779 
780 #ifdef FDT
781 static void
782 try_load_dtb(caddr_t kmdp)
783 {
784 	vm_offset_t dtbp;
785 
786 	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
787 	if (dtbp == (vm_offset_t)NULL) {
788 		printf("ERROR loading DTB\n");
789 		return;
790 	}
791 
792 	if (OF_install(OFW_FDT, 0) == FALSE)
793 		panic("Cannot install FDT");
794 
795 	if (OF_init((void *)dtbp) != 0)
796 		panic("OF_init failed with the found device tree");
797 }
798 #endif
799 
800 static void
801 cache_setup(void)
802 {
803 	int dcache_line_shift, icache_line_shift;
804 	uint32_t ctr_el0;
805 
806 	ctr_el0 = READ_SPECIALREG(ctr_el0);
807 
808 	/* Read the log2 words in each D cache line */
809 	dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
810 	/* Get the D cache line size */
811 	dcache_line_size = sizeof(int) << dcache_line_shift;
812 
813 	/* And the same for the I cache */
814 	icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
815 	icache_line_size = sizeof(int) << icache_line_shift;
816 
817 	idcache_line_size = MIN(dcache_line_size, icache_line_size);
818 }
819 
820 void
821 initarm(struct arm64_bootparams *abp)
822 {
823 	struct efi_map_header *efihdr;
824 	struct pcpu *pcpup;
825 #ifdef FDT
826 	struct mem_region mem_regions[FDT_MEM_REGIONS];
827 	int mem_regions_sz;
828 #endif
829 	vm_offset_t lastaddr;
830 	caddr_t kmdp;
831 	vm_paddr_t mem_len;
832 	int i;
833 
834 	/* Set the module data location */
835 	preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
836 
837 	/* Find the kernel address */
838 	kmdp = preload_search_by_type("elf kernel");
839 	if (kmdp == NULL)
840 		kmdp = preload_search_by_type("elf64 kernel");
841 
842 	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
843 	init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
844 
845 #ifdef FDT
846 	try_load_dtb(kmdp);
847 #endif
848 
849 	/* Find the address to start allocating from */
850 	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
851 
852 	/* Load the physical memory ranges */
853 	physmap_idx = 0;
854 	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
855 	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
856 	if (efihdr != NULL)
857 		add_efi_map_entries(efihdr, physmap, &physmap_idx);
858 #ifdef FDT
859 	else {
860 		/* Grab physical memory regions information from device tree. */
861 		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
862 		    NULL) != 0)
863 			panic("Cannot get physical memory regions");
864 		add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
865 		    &physmap_idx);
866 	}
867 #endif
868 
869 	/* Print the memory map */
870 	mem_len = 0;
871 	for (i = 0; i < physmap_idx; i += 2) {
872 		dump_avail[i] = physmap[i];
873 		dump_avail[i + 1] = physmap[i + 1];
874 		mem_len += physmap[i + 1] - physmap[i];
875 	}
876 	dump_avail[i] = 0;
877 	dump_avail[i + 1] = 0;
878 
879 	/* Set the pcpu data, this is needed by pmap_bootstrap */
880 	pcpup = &__pcpu[0];
881 	pcpu_init(pcpup, 0, sizeof(struct pcpu));
882 
883 	/*
884 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
885 	 * loaded when entering the kernel from userland.
886 	 */
887 	__asm __volatile(
888 	    "mov x18, %0 \n"
889 	    "msr tpidr_el1, %0" :: "r"(pcpup));
890 
891 	PCPU_SET(curthread, &thread0);
892 
893 	/* Do basic tuning, hz etc */
894 	init_param1();
895 
896 	cache_setup();
897 
898 	/* Bootstrap enough of pmap  to enter the kernel proper */
899 	pmap_bootstrap(abp->kern_l1pt, KERNBASE - abp->kern_delta,
900 	    lastaddr - KERNBASE);
901 
902 	arm_devmap_bootstrap(0, NULL);
903 
904 	cninit();
905 
906 	init_proc0(abp->kern_stack);
907 	msgbufinit(msgbufp, msgbufsize);
908 	mutex_init();
909 	init_param2(physmem);
910 
911 	dbg_monitor_init();
912 	kdb_init();
913 
914 	early_boot = 0;
915 }
916 
917 uint32_t (*arm_cpu_fill_vdso_timehands)(struct vdso_timehands *,
918     struct timecounter *);
919 
920 uint32_t
921 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
922 {
923 
924 	return (arm_cpu_fill_vdso_timehands != NULL ?
925 	    arm_cpu_fill_vdso_timehands(vdso_th, tc) : 0);
926 }
927 
928 #ifdef DDB
929 #include <ddb/ddb.h>
930 
931 DB_SHOW_COMMAND(specialregs, db_show_spregs)
932 {
933 #define	PRINT_REG(reg)	\
934     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
935 
936 	PRINT_REG(actlr_el1);
937 	PRINT_REG(afsr0_el1);
938 	PRINT_REG(afsr1_el1);
939 	PRINT_REG(aidr_el1);
940 	PRINT_REG(amair_el1);
941 	PRINT_REG(ccsidr_el1);
942 	PRINT_REG(clidr_el1);
943 	PRINT_REG(contextidr_el1);
944 	PRINT_REG(cpacr_el1);
945 	PRINT_REG(csselr_el1);
946 	PRINT_REG(ctr_el0);
947 	PRINT_REG(currentel);
948 	PRINT_REG(daif);
949 	PRINT_REG(dczid_el0);
950 	PRINT_REG(elr_el1);
951 	PRINT_REG(esr_el1);
952 	PRINT_REG(far_el1);
953 #if 0
954 	/* ARM64TODO: Enable VFP before reading floating-point registers */
955 	PRINT_REG(fpcr);
956 	PRINT_REG(fpsr);
957 #endif
958 	PRINT_REG(id_aa64afr0_el1);
959 	PRINT_REG(id_aa64afr1_el1);
960 	PRINT_REG(id_aa64dfr0_el1);
961 	PRINT_REG(id_aa64dfr1_el1);
962 	PRINT_REG(id_aa64isar0_el1);
963 	PRINT_REG(id_aa64isar1_el1);
964 	PRINT_REG(id_aa64pfr0_el1);
965 	PRINT_REG(id_aa64pfr1_el1);
966 	PRINT_REG(id_afr0_el1);
967 	PRINT_REG(id_dfr0_el1);
968 	PRINT_REG(id_isar0_el1);
969 	PRINT_REG(id_isar1_el1);
970 	PRINT_REG(id_isar2_el1);
971 	PRINT_REG(id_isar3_el1);
972 	PRINT_REG(id_isar4_el1);
973 	PRINT_REG(id_isar5_el1);
974 	PRINT_REG(id_mmfr0_el1);
975 	PRINT_REG(id_mmfr1_el1);
976 	PRINT_REG(id_mmfr2_el1);
977 	PRINT_REG(id_mmfr3_el1);
978 #if 0
979 	/* Missing from llvm */
980 	PRINT_REG(id_mmfr4_el1);
981 #endif
982 	PRINT_REG(id_pfr0_el1);
983 	PRINT_REG(id_pfr1_el1);
984 	PRINT_REG(isr_el1);
985 	PRINT_REG(mair_el1);
986 	PRINT_REG(midr_el1);
987 	PRINT_REG(mpidr_el1);
988 	PRINT_REG(mvfr0_el1);
989 	PRINT_REG(mvfr1_el1);
990 	PRINT_REG(mvfr2_el1);
991 	PRINT_REG(revidr_el1);
992 	PRINT_REG(sctlr_el1);
993 	PRINT_REG(sp_el0);
994 	PRINT_REG(spsel);
995 	PRINT_REG(spsr_el1);
996 	PRINT_REG(tcr_el1);
997 	PRINT_REG(tpidr_el0);
998 	PRINT_REG(tpidr_el1);
999 	PRINT_REG(tpidrro_el0);
1000 	PRINT_REG(ttbr0_el1);
1001 	PRINT_REG(ttbr1_el1);
1002 	PRINT_REG(vbar_el1);
1003 #undef PRINT_REG
1004 }
1005 
1006 DB_SHOW_COMMAND(vtop, db_show_vtop)
1007 {
1008 	uint64_t phys;
1009 
1010 	if (have_addr) {
1011 		phys = arm64_address_translate_s1e1r(addr);
1012 		db_printf("Physical address reg: 0x%016lx\n", phys);
1013 	} else
1014 		db_printf("show vtop <virt_addr>\n");
1015 }
1016 #endif
1017