xref: /freebsd/sys/arm64/arm64/machdep.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_acpi.h"
29 #include "opt_platform.h"
30 #include "opt_ddb.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/cons.h>
40 #include <sys/cpu.h>
41 #include <sys/devmap.h>
42 #include <sys/efi.h>
43 #include <sys/exec.h>
44 #include <sys/imgact.h>
45 #include <sys/kdb.h>
46 #include <sys/kernel.h>
47 #include <sys/limits.h>
48 #include <sys/linker.h>
49 #include <sys/msgbuf.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/ptrace.h>
53 #include <sys/reboot.h>
54 #include <sys/rwlock.h>
55 #include <sys/sched.h>
56 #include <sys/signalvar.h>
57 #include <sys/syscallsubr.h>
58 #include <sys/sysent.h>
59 #include <sys/sysproto.h>
60 #include <sys/ucontext.h>
61 #include <sys/vdso.h>
62 
63 #include <vm/vm.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_pager.h>
70 
71 #include <machine/armreg.h>
72 #include <machine/cpu.h>
73 #include <machine/debug_monitor.h>
74 #include <machine/kdb.h>
75 #include <machine/machdep.h>
76 #include <machine/metadata.h>
77 #include <machine/md_var.h>
78 #include <machine/pcb.h>
79 #include <machine/reg.h>
80 #include <machine/undefined.h>
81 #include <machine/vmparam.h>
82 
83 #include <arm/include/physmem.h>
84 
85 #ifdef VFP
86 #include <machine/vfp.h>
87 #endif
88 
89 #ifdef DEV_ACPI
90 #include <contrib/dev/acpica/include/acpi.h>
91 #include <machine/acpica_machdep.h>
92 #endif
93 
94 #ifdef FDT
95 #include <dev/fdt/fdt_common.h>
96 #include <dev/ofw/openfirm.h>
97 #endif
98 
99 
100 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
101 
102 struct pcpu __pcpu[MAXCPU];
103 
104 static struct trapframe proc0_tf;
105 
106 int early_boot = 1;
107 int cold = 1;
108 
109 struct kva_md_info kmi;
110 
111 int64_t dcache_line_size;	/* The minimum D cache line size */
112 int64_t icache_line_size;	/* The minimum I cache line size */
113 int64_t idcache_line_size;	/* The minimum cache line size */
114 int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
115 int has_pan;
116 
117 /*
118  * Physical address of the EFI System Table. Stashed from the metadata hints
119  * passed into the kernel and used by the EFI code to call runtime services.
120  */
121 vm_paddr_t efi_systbl_phys;
122 
123 /* pagezero_* implementations are provided in support.S */
124 void pagezero_simple(void *);
125 void pagezero_cache(void *);
126 
127 /* pagezero_simple is default pagezero */
128 void (*pagezero)(void *p) = pagezero_simple;
129 
130 static void
131 pan_setup(void)
132 {
133 	uint64_t id_aa64mfr1;
134 
135 	id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
136 	if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
137 		has_pan = 1;
138 }
139 
140 void
141 pan_enable(void)
142 {
143 
144 	/*
145 	 * The LLVM integrated assembler doesn't understand the PAN
146 	 * PSTATE field. Because of this we need to manually create
147 	 * the instruction in an asm block. This is equivalent to:
148 	 * msr pan, #1
149 	 *
150 	 * This sets the PAN bit, stopping the kernel from accessing
151 	 * memory when userspace can also access it unless the kernel
152 	 * uses the userspace load/store instructions.
153 	 */
154 	if (has_pan) {
155 		WRITE_SPECIALREG(sctlr_el1,
156 		    READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
157 		__asm __volatile(".inst 0xd500409f | (0x1 << 8)");
158 	}
159 }
160 
161 static void
162 cpu_startup(void *dummy)
163 {
164 
165 	undef_init();
166 	identify_cpu();
167 	install_cpu_errata();
168 
169 	vm_ksubmap_init(&kmi);
170 	bufinit();
171 	vm_pager_bufferinit();
172 }
173 
174 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
175 
176 int
177 cpu_idle_wakeup(int cpu)
178 {
179 
180 	return (0);
181 }
182 
183 int
184 fill_regs(struct thread *td, struct reg *regs)
185 {
186 	struct trapframe *frame;
187 
188 	frame = td->td_frame;
189 	regs->sp = frame->tf_sp;
190 	regs->lr = frame->tf_lr;
191 	regs->elr = frame->tf_elr;
192 	regs->spsr = frame->tf_spsr;
193 
194 	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
195 
196 	return (0);
197 }
198 
199 int
200 set_regs(struct thread *td, struct reg *regs)
201 {
202 	struct trapframe *frame;
203 
204 	frame = td->td_frame;
205 	frame->tf_sp = regs->sp;
206 	frame->tf_lr = regs->lr;
207 	frame->tf_elr = regs->elr;
208 	frame->tf_spsr &= ~PSR_FLAGS;
209 	frame->tf_spsr |= regs->spsr & PSR_FLAGS;
210 
211 	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
212 
213 	return (0);
214 }
215 
216 int
217 fill_fpregs(struct thread *td, struct fpreg *regs)
218 {
219 #ifdef VFP
220 	struct pcb *pcb;
221 
222 	pcb = td->td_pcb;
223 	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
224 		/*
225 		 * If we have just been running VFP instructions we will
226 		 * need to save the state to memcpy it below.
227 		 */
228 		if (td == curthread)
229 			vfp_save_state(td, pcb);
230 
231 		KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
232 		    ("Called fill_fpregs while the kernel is using the VFP"));
233 		memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
234 		    sizeof(regs->fp_q));
235 		regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
236 		regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
237 	} else
238 #endif
239 		memset(regs, 0, sizeof(*regs));
240 	return (0);
241 }
242 
243 int
244 set_fpregs(struct thread *td, struct fpreg *regs)
245 {
246 #ifdef VFP
247 	struct pcb *pcb;
248 
249 	pcb = td->td_pcb;
250 	KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
251 	    ("Called set_fpregs while the kernel is using the VFP"));
252 	memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
253 	pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
254 	pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
255 #endif
256 	return (0);
257 }
258 
259 int
260 fill_dbregs(struct thread *td, struct dbreg *regs)
261 {
262 
263 	printf("ARM64TODO: fill_dbregs");
264 	return (EDOOFUS);
265 }
266 
267 int
268 set_dbregs(struct thread *td, struct dbreg *regs)
269 {
270 
271 	printf("ARM64TODO: set_dbregs");
272 	return (EDOOFUS);
273 }
274 
275 #ifdef COMPAT_FREEBSD32
276 int
277 fill_regs32(struct thread *td, struct reg32 *regs)
278 {
279 	int i;
280 	struct trapframe *tf;
281 
282 	tf = td->td_frame;
283 	for (i = 0; i < 13; i++)
284 		regs->r[i] = tf->tf_x[i];
285 	regs->r_sp = tf->tf_sp;
286 	regs->r_lr = tf->tf_lr;
287 	regs->r_pc = tf->tf_elr;
288 	regs->r_cpsr = tf->tf_spsr;
289 
290 	return (0);
291 }
292 
293 int
294 set_regs32(struct thread *td, struct reg32 *regs)
295 {
296 	int i;
297 	struct trapframe *tf;
298 
299 	tf = td->td_frame;
300 	for (i = 0; i < 13; i++)
301 		tf->tf_x[i] = regs->r[i];
302 	tf->tf_sp = regs->r_sp;
303 	tf->tf_lr = regs->r_lr;
304 	tf->tf_elr = regs->r_pc;
305 	tf->tf_spsr = regs->r_cpsr;
306 
307 
308 	return (0);
309 }
310 
311 int
312 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
313 {
314 
315 	printf("ARM64TODO: fill_fpregs32");
316 	return (EDOOFUS);
317 }
318 
319 int
320 set_fpregs32(struct thread *td, struct fpreg32 *regs)
321 {
322 
323 	printf("ARM64TODO: set_fpregs32");
324 	return (EDOOFUS);
325 }
326 
327 int
328 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
329 {
330 
331 	printf("ARM64TODO: fill_dbregs32");
332 	return (EDOOFUS);
333 }
334 
335 int
336 set_dbregs32(struct thread *td, struct dbreg32 *regs)
337 {
338 
339 	printf("ARM64TODO: set_dbregs32");
340 	return (EDOOFUS);
341 }
342 #endif
343 
344 int
345 ptrace_set_pc(struct thread *td, u_long addr)
346 {
347 
348 	printf("ARM64TODO: ptrace_set_pc");
349 	return (EDOOFUS);
350 }
351 
352 int
353 ptrace_single_step(struct thread *td)
354 {
355 
356 	td->td_frame->tf_spsr |= PSR_SS;
357 	td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
358 	return (0);
359 }
360 
361 int
362 ptrace_clear_single_step(struct thread *td)
363 {
364 
365 	td->td_frame->tf_spsr &= ~PSR_SS;
366 	td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
367 	return (0);
368 }
369 
370 void
371 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
372 {
373 	struct trapframe *tf = td->td_frame;
374 
375 	memset(tf, 0, sizeof(struct trapframe));
376 
377 	tf->tf_x[0] = stack;
378 	tf->tf_sp = STACKALIGN(stack);
379 	tf->tf_lr = imgp->entry_addr;
380 	tf->tf_elr = imgp->entry_addr;
381 }
382 
383 /* Sanity check these are the same size, they will be memcpy'd to and fro */
384 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
385     sizeof((struct gpregs *)0)->gp_x);
386 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
387     sizeof((struct reg *)0)->x);
388 
389 int
390 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
391 {
392 	struct trapframe *tf = td->td_frame;
393 
394 	if (clear_ret & GET_MC_CLEAR_RET) {
395 		mcp->mc_gpregs.gp_x[0] = 0;
396 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
397 	} else {
398 		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
399 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
400 	}
401 
402 	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
403 	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
404 
405 	mcp->mc_gpregs.gp_sp = tf->tf_sp;
406 	mcp->mc_gpregs.gp_lr = tf->tf_lr;
407 	mcp->mc_gpregs.gp_elr = tf->tf_elr;
408 
409 	return (0);
410 }
411 
412 int
413 set_mcontext(struct thread *td, mcontext_t *mcp)
414 {
415 	struct trapframe *tf = td->td_frame;
416 	uint32_t spsr;
417 
418 	spsr = mcp->mc_gpregs.gp_spsr;
419 	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
420 	    (spsr & (PSR_AARCH32 | PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
421 		return (EINVAL);
422 
423 	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
424 
425 	tf->tf_sp = mcp->mc_gpregs.gp_sp;
426 	tf->tf_lr = mcp->mc_gpregs.gp_lr;
427 	tf->tf_elr = mcp->mc_gpregs.gp_elr;
428 	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
429 
430 	return (0);
431 }
432 
433 static void
434 get_fpcontext(struct thread *td, mcontext_t *mcp)
435 {
436 #ifdef VFP
437 	struct pcb *curpcb;
438 
439 	critical_enter();
440 
441 	curpcb = curthread->td_pcb;
442 
443 	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
444 		/*
445 		 * If we have just been running VFP instructions we will
446 		 * need to save the state to memcpy it below.
447 		 */
448 		vfp_save_state(td, curpcb);
449 
450 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
451 		    ("Called get_fpcontext while the kernel is using the VFP"));
452 		KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
453 		    ("Non-userspace FPU flags set in get_fpcontext"));
454 		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
455 		    sizeof(mcp->mc_fpregs));
456 		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
457 		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
458 		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
459 		mcp->mc_flags |= _MC_FP_VALID;
460 	}
461 
462 	critical_exit();
463 #endif
464 }
465 
466 static void
467 set_fpcontext(struct thread *td, mcontext_t *mcp)
468 {
469 #ifdef VFP
470 	struct pcb *curpcb;
471 
472 	critical_enter();
473 
474 	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
475 		curpcb = curthread->td_pcb;
476 
477 		/*
478 		 * Discard any vfp state for the current thread, we
479 		 * are about to override it.
480 		 */
481 		vfp_discard(td);
482 
483 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
484 		    ("Called set_fpcontext while the kernel is using the VFP"));
485 		memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
486 		    sizeof(mcp->mc_fpregs));
487 		curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
488 		curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
489 		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
490 	}
491 
492 	critical_exit();
493 #endif
494 }
495 
496 void
497 cpu_idle(int busy)
498 {
499 
500 	spinlock_enter();
501 	if (!busy)
502 		cpu_idleclock();
503 	if (!sched_runnable())
504 		__asm __volatile(
505 		    "dsb sy \n"
506 		    "wfi    \n");
507 	if (!busy)
508 		cpu_activeclock();
509 	spinlock_exit();
510 }
511 
512 void
513 cpu_halt(void)
514 {
515 
516 	/* We should have shutdown by now, if not enter a low power sleep */
517 	intr_disable();
518 	while (1) {
519 		__asm __volatile("wfi");
520 	}
521 }
522 
523 /*
524  * Flush the D-cache for non-DMA I/O so that the I-cache can
525  * be made coherent later.
526  */
527 void
528 cpu_flush_dcache(void *ptr, size_t len)
529 {
530 
531 	/* ARM64TODO TBD */
532 }
533 
534 /* Get current clock frequency for the given CPU ID. */
535 int
536 cpu_est_clockrate(int cpu_id, uint64_t *rate)
537 {
538 	struct pcpu *pc;
539 
540 	pc = pcpu_find(cpu_id);
541 	if (pc == NULL || rate == NULL)
542 		return (EINVAL);
543 
544 	if (pc->pc_clock == 0)
545 		return (EOPNOTSUPP);
546 
547 	*rate = pc->pc_clock;
548 	return (0);
549 }
550 
551 void
552 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
553 {
554 
555 	pcpu->pc_acpi_id = 0xffffffff;
556 }
557 
558 void
559 spinlock_enter(void)
560 {
561 	struct thread *td;
562 	register_t daif;
563 
564 	td = curthread;
565 	if (td->td_md.md_spinlock_count == 0) {
566 		daif = intr_disable();
567 		td->td_md.md_spinlock_count = 1;
568 		td->td_md.md_saved_daif = daif;
569 	} else
570 		td->td_md.md_spinlock_count++;
571 	critical_enter();
572 }
573 
574 void
575 spinlock_exit(void)
576 {
577 	struct thread *td;
578 	register_t daif;
579 
580 	td = curthread;
581 	critical_exit();
582 	daif = td->td_md.md_saved_daif;
583 	td->td_md.md_spinlock_count--;
584 	if (td->td_md.md_spinlock_count == 0)
585 		intr_restore(daif);
586 }
587 
588 #ifndef	_SYS_SYSPROTO_H_
589 struct sigreturn_args {
590 	ucontext_t *ucp;
591 };
592 #endif
593 
594 int
595 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
596 {
597 	ucontext_t uc;
598 	int error;
599 
600 	if (uap == NULL)
601 		return (EFAULT);
602 	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
603 		return (EFAULT);
604 
605 	error = set_mcontext(td, &uc.uc_mcontext);
606 	if (error != 0)
607 		return (error);
608 	set_fpcontext(td, &uc.uc_mcontext);
609 
610 	/* Restore signal mask. */
611 	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
612 
613 	return (EJUSTRETURN);
614 }
615 
616 /*
617  * Construct a PCB from a trapframe. This is called from kdb_trap() where
618  * we want to start a backtrace from the function that caused us to enter
619  * the debugger. We have the context in the trapframe, but base the trace
620  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
621  * enough for a backtrace.
622  */
623 void
624 makectx(struct trapframe *tf, struct pcb *pcb)
625 {
626 	int i;
627 
628 	for (i = 0; i < PCB_LR; i++)
629 		pcb->pcb_x[i] = tf->tf_x[i];
630 
631 	pcb->pcb_x[PCB_LR] = tf->tf_lr;
632 	pcb->pcb_pc = tf->tf_elr;
633 	pcb->pcb_sp = tf->tf_sp;
634 }
635 
636 void
637 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
638 {
639 	struct thread *td;
640 	struct proc *p;
641 	struct trapframe *tf;
642 	struct sigframe *fp, frame;
643 	struct sigacts *psp;
644 	struct sysentvec *sysent;
645 	int onstack, sig;
646 
647 	td = curthread;
648 	p = td->td_proc;
649 	PROC_LOCK_ASSERT(p, MA_OWNED);
650 
651 	sig = ksi->ksi_signo;
652 	psp = p->p_sigacts;
653 	mtx_assert(&psp->ps_mtx, MA_OWNED);
654 
655 	tf = td->td_frame;
656 	onstack = sigonstack(tf->tf_sp);
657 
658 	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
659 	    catcher, sig);
660 
661 	/* Allocate and validate space for the signal handler context. */
662 	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
663 	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
664 		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
665 		    td->td_sigstk.ss_size);
666 #if defined(COMPAT_43)
667 		td->td_sigstk.ss_flags |= SS_ONSTACK;
668 #endif
669 	} else {
670 		fp = (struct sigframe *)td->td_frame->tf_sp;
671 	}
672 
673 	/* Make room, keeping the stack aligned */
674 	fp--;
675 	fp = (struct sigframe *)STACKALIGN(fp);
676 
677 	/* Fill in the frame to copy out */
678 	bzero(&frame, sizeof(frame));
679 	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
680 	get_fpcontext(td, &frame.sf_uc.uc_mcontext);
681 	frame.sf_si = ksi->ksi_info;
682 	frame.sf_uc.uc_sigmask = *mask;
683 	frame.sf_uc.uc_stack = td->td_sigstk;
684 	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
685 	    (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
686 	mtx_unlock(&psp->ps_mtx);
687 	PROC_UNLOCK(td->td_proc);
688 
689 	/* Copy the sigframe out to the user's stack. */
690 	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
691 		/* Process has trashed its stack. Kill it. */
692 		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
693 		PROC_LOCK(p);
694 		sigexit(td, SIGILL);
695 	}
696 
697 	tf->tf_x[0]= sig;
698 	tf->tf_x[1] = (register_t)&fp->sf_si;
699 	tf->tf_x[2] = (register_t)&fp->sf_uc;
700 
701 	tf->tf_elr = (register_t)catcher;
702 	tf->tf_sp = (register_t)fp;
703 	sysent = p->p_sysent;
704 	if (sysent->sv_sigcode_base != 0)
705 		tf->tf_lr = (register_t)sysent->sv_sigcode_base;
706 	else
707 		tf->tf_lr = (register_t)(sysent->sv_psstrings -
708 		    *(sysent->sv_szsigcode));
709 
710 	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
711 	    tf->tf_sp);
712 
713 	PROC_LOCK(p);
714 	mtx_lock(&psp->ps_mtx);
715 }
716 
717 static void
718 init_proc0(vm_offset_t kstack)
719 {
720 	struct pcpu *pcpup = &__pcpu[0];
721 
722 	proc_linkup0(&proc0, &thread0);
723 	thread0.td_kstack = kstack;
724 	thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
725 	thread0.td_pcb->pcb_fpflags = 0;
726 	thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
727 	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
728 	thread0.td_frame = &proc0_tf;
729 	pcpup->pc_curpcb = thread0.td_pcb;
730 
731 	/* Set the base address of translation table 0. */
732 	thread0.td_proc->p_md.md_l0addr = READ_SPECIALREG(ttbr0_el1);
733 }
734 
735 typedef struct {
736 	uint32_t type;
737 	uint64_t phys_start;
738 	uint64_t virt_start;
739 	uint64_t num_pages;
740 	uint64_t attr;
741 } EFI_MEMORY_DESCRIPTOR;
742 
743 typedef void (*efi_map_entry_cb)(struct efi_md *);
744 
745 static void
746 foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb)
747 {
748 	struct efi_md *map, *p;
749 	size_t efisz;
750 	int ndesc, i;
751 
752 	/*
753 	 * Memory map data provided by UEFI via the GetMemoryMap
754 	 * Boot Services API.
755 	 */
756 	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
757 	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
758 
759 	if (efihdr->descriptor_size == 0)
760 		return;
761 	ndesc = efihdr->memory_size / efihdr->descriptor_size;
762 
763 	for (i = 0, p = map; i < ndesc; i++,
764 	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
765 		cb(p);
766 	}
767 }
768 
769 static void
770 exclude_efi_map_entry(struct efi_md *p)
771 {
772 
773 	switch (p->md_type) {
774 	case EFI_MD_TYPE_CODE:
775 	case EFI_MD_TYPE_DATA:
776 	case EFI_MD_TYPE_BS_CODE:
777 	case EFI_MD_TYPE_BS_DATA:
778 	case EFI_MD_TYPE_FREE:
779 		/*
780 		 * We're allowed to use any entry with these types.
781 		 */
782 		break;
783 	default:
784 		arm_physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE,
785 		    EXFLAG_NOALLOC);
786 	}
787 }
788 
789 static void
790 exclude_efi_map_entries(struct efi_map_header *efihdr)
791 {
792 
793 	foreach_efi_map_entry(efihdr, exclude_efi_map_entry);
794 }
795 
796 static void
797 add_efi_map_entry(struct efi_md *p)
798 {
799 
800 	switch (p->md_type) {
801 	case EFI_MD_TYPE_RT_DATA:
802 		/*
803 		 * Runtime data will be excluded after the DMAP
804 		 * region is created to stop it from being added
805 		 * to phys_avail.
806 		 */
807 	case EFI_MD_TYPE_CODE:
808 	case EFI_MD_TYPE_DATA:
809 	case EFI_MD_TYPE_BS_CODE:
810 	case EFI_MD_TYPE_BS_DATA:
811 	case EFI_MD_TYPE_FREE:
812 		/*
813 		 * We're allowed to use any entry with these types.
814 		 */
815 		arm_physmem_hardware_region(p->md_phys,
816 		    p->md_pages * PAGE_SIZE);
817 		break;
818 	}
819 }
820 
821 static void
822 add_efi_map_entries(struct efi_map_header *efihdr)
823 {
824 
825 	foreach_efi_map_entry(efihdr, add_efi_map_entry);
826 }
827 
828 static void
829 print_efi_map_entry(struct efi_md *p)
830 {
831 	const char *type;
832 	static const char *types[] = {
833 		"Reserved",
834 		"LoaderCode",
835 		"LoaderData",
836 		"BootServicesCode",
837 		"BootServicesData",
838 		"RuntimeServicesCode",
839 		"RuntimeServicesData",
840 		"ConventionalMemory",
841 		"UnusableMemory",
842 		"ACPIReclaimMemory",
843 		"ACPIMemoryNVS",
844 		"MemoryMappedIO",
845 		"MemoryMappedIOPortSpace",
846 		"PalCode",
847 		"PersistentMemory"
848 	};
849 
850 	if (p->md_type < nitems(types))
851 		type = types[p->md_type];
852 	else
853 		type = "<INVALID>";
854 	printf("%23s %012lx %12p %08lx ", type, p->md_phys,
855 	    p->md_virt, p->md_pages);
856 	if (p->md_attr & EFI_MD_ATTR_UC)
857 		printf("UC ");
858 	if (p->md_attr & EFI_MD_ATTR_WC)
859 		printf("WC ");
860 	if (p->md_attr & EFI_MD_ATTR_WT)
861 		printf("WT ");
862 	if (p->md_attr & EFI_MD_ATTR_WB)
863 		printf("WB ");
864 	if (p->md_attr & EFI_MD_ATTR_UCE)
865 		printf("UCE ");
866 	if (p->md_attr & EFI_MD_ATTR_WP)
867 		printf("WP ");
868 	if (p->md_attr & EFI_MD_ATTR_RP)
869 		printf("RP ");
870 	if (p->md_attr & EFI_MD_ATTR_XP)
871 		printf("XP ");
872 	if (p->md_attr & EFI_MD_ATTR_NV)
873 		printf("NV ");
874 	if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
875 		printf("MORE_RELIABLE ");
876 	if (p->md_attr & EFI_MD_ATTR_RO)
877 		printf("RO ");
878 	if (p->md_attr & EFI_MD_ATTR_RT)
879 		printf("RUNTIME");
880 	printf("\n");
881 }
882 
883 static void
884 print_efi_map_entries(struct efi_map_header *efihdr)
885 {
886 
887 	printf("%23s %12s %12s %8s %4s\n",
888 	    "Type", "Physical", "Virtual", "#Pages", "Attr");
889 	foreach_efi_map_entry(efihdr, print_efi_map_entry);
890 }
891 
892 #ifdef FDT
893 static void
894 try_load_dtb(caddr_t kmdp)
895 {
896 	vm_offset_t dtbp;
897 
898 	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
899 	if (dtbp == (vm_offset_t)NULL) {
900 		printf("ERROR loading DTB\n");
901 		return;
902 	}
903 
904 	if (OF_install(OFW_FDT, 0) == FALSE)
905 		panic("Cannot install FDT");
906 
907 	if (OF_init((void *)dtbp) != 0)
908 		panic("OF_init failed with the found device tree");
909 }
910 #endif
911 
912 static bool
913 bus_probe(void)
914 {
915 	bool has_acpi, has_fdt;
916 	char *order, *env;
917 
918 	has_acpi = has_fdt = false;
919 
920 #ifdef FDT
921 	has_fdt = (OF_peer(0) != 0);
922 #endif
923 #ifdef DEV_ACPI
924 	has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0);
925 #endif
926 
927 	env = kern_getenv("kern.cfg.order");
928 	if (env != NULL) {
929 		order = env;
930 		while (order != NULL) {
931 			if (has_acpi &&
932 			    strncmp(order, "acpi", 4) == 0 &&
933 			    (order[4] == ',' || order[4] == '\0')) {
934 				arm64_bus_method = ARM64_BUS_ACPI;
935 				break;
936 			}
937 			if (has_fdt &&
938 			    strncmp(order, "fdt", 3) == 0 &&
939 			    (order[3] == ',' || order[3] == '\0')) {
940 				arm64_bus_method = ARM64_BUS_FDT;
941 				break;
942 			}
943 			order = strchr(order, ',');
944 		}
945 		freeenv(env);
946 
947 		/* If we set the bus method it is valid */
948 		if (arm64_bus_method != ARM64_BUS_NONE)
949 			return (true);
950 	}
951 	/* If no order or an invalid order was set use the default */
952 	if (arm64_bus_method == ARM64_BUS_NONE) {
953 		if (has_fdt)
954 			arm64_bus_method = ARM64_BUS_FDT;
955 		else if (has_acpi)
956 			arm64_bus_method = ARM64_BUS_ACPI;
957 	}
958 
959 	/*
960 	 * If no option was set the default is valid, otherwise we are
961 	 * setting one to get cninit() working, then calling panic to tell
962 	 * the user about the invalid bus setup.
963 	 */
964 	return (env == NULL);
965 }
966 
967 static void
968 cache_setup(void)
969 {
970 	int dcache_line_shift, icache_line_shift, dczva_line_shift;
971 	uint32_t ctr_el0;
972 	uint32_t dczid_el0;
973 
974 	ctr_el0 = READ_SPECIALREG(ctr_el0);
975 
976 	/* Read the log2 words in each D cache line */
977 	dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
978 	/* Get the D cache line size */
979 	dcache_line_size = sizeof(int) << dcache_line_shift;
980 
981 	/* And the same for the I cache */
982 	icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
983 	icache_line_size = sizeof(int) << icache_line_shift;
984 
985 	idcache_line_size = MIN(dcache_line_size, icache_line_size);
986 
987 	dczid_el0 = READ_SPECIALREG(dczid_el0);
988 
989 	/* Check if dc zva is not prohibited */
990 	if (dczid_el0 & DCZID_DZP)
991 		dczva_line_size = 0;
992 	else {
993 		/* Same as with above calculations */
994 		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
995 		dczva_line_size = sizeof(int) << dczva_line_shift;
996 
997 		/* Change pagezero function */
998 		pagezero = pagezero_cache;
999 	}
1000 }
1001 
1002 void
1003 initarm(struct arm64_bootparams *abp)
1004 {
1005 	struct efi_fb *efifb;
1006 	struct efi_map_header *efihdr;
1007 	struct pcpu *pcpup;
1008 	char *env;
1009 #ifdef FDT
1010 	struct mem_region mem_regions[FDT_MEM_REGIONS];
1011 	int mem_regions_sz;
1012 #endif
1013 	vm_offset_t lastaddr;
1014 	caddr_t kmdp;
1015 	bool valid;
1016 
1017 	/* Set the module data location */
1018 	preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
1019 
1020 	/* Find the kernel address */
1021 	kmdp = preload_search_by_type("elf kernel");
1022 	if (kmdp == NULL)
1023 		kmdp = preload_search_by_type("elf64 kernel");
1024 
1025 	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1026 	init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
1027 	link_elf_ireloc(kmdp);
1028 
1029 #ifdef FDT
1030 	try_load_dtb(kmdp);
1031 #endif
1032 
1033 	efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1034 
1035 	/* Find the address to start allocating from */
1036 	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
1037 
1038 	/* Load the physical memory ranges */
1039 	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1040 	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
1041 	if (efihdr != NULL)
1042 		add_efi_map_entries(efihdr);
1043 #ifdef FDT
1044 	else {
1045 		/* Grab physical memory regions information from device tree. */
1046 		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1047 		    NULL) != 0)
1048 			panic("Cannot get physical memory regions");
1049 		arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1050 	}
1051 	if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0)
1052 		arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1053 		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1054 #endif
1055 
1056 	/* Exclude the EFI framebuffer from our view of physical memory. */
1057 	efifb = (struct efi_fb *)preload_search_info(kmdp,
1058 	    MODINFO_METADATA | MODINFOMD_EFI_FB);
1059 	if (efifb != NULL)
1060 		arm_physmem_exclude_region(efifb->fb_addr, efifb->fb_size,
1061 		    EXFLAG_NOALLOC);
1062 
1063 	/* Set the pcpu data, this is needed by pmap_bootstrap */
1064 	pcpup = &__pcpu[0];
1065 	pcpu_init(pcpup, 0, sizeof(struct pcpu));
1066 
1067 	/*
1068 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
1069 	 * loaded when entering the kernel from userland.
1070 	 */
1071 	__asm __volatile(
1072 	    "mov x18, %0 \n"
1073 	    "msr tpidr_el1, %0" :: "r"(pcpup));
1074 
1075 	PCPU_SET(curthread, &thread0);
1076 
1077 	/* Do basic tuning, hz etc */
1078 	init_param1();
1079 
1080 	cache_setup();
1081 	pan_setup();
1082 
1083 	/* Bootstrap enough of pmap  to enter the kernel proper */
1084 	pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1085 	    KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1086 	/* Exclude entries neexed in teh DMAP region, but not phys_avail */
1087 	if (efihdr != NULL)
1088 		exclude_efi_map_entries(efihdr);
1089 	arm_physmem_init_kernel_globals();
1090 
1091 	devmap_bootstrap(0, NULL);
1092 
1093 	valid = bus_probe();
1094 
1095 	cninit();
1096 
1097 	if (!valid)
1098 		panic("Invalid bus configuration: %s",
1099 		    kern_getenv("kern.cfg.order"));
1100 
1101 	init_proc0(abp->kern_stack);
1102 	msgbufinit(msgbufp, msgbufsize);
1103 	mutex_init();
1104 	init_param2(physmem);
1105 
1106 	dbg_init();
1107 	kdb_init();
1108 	pan_enable();
1109 
1110 	env = kern_getenv("kernelname");
1111 	if (env != NULL)
1112 		strlcpy(kernelname, env, sizeof(kernelname));
1113 
1114 	if (boothowto & RB_VERBOSE) {
1115 		print_efi_map_entries(efihdr);
1116 		arm_physmem_print_tables();
1117 	}
1118 
1119 	early_boot = 0;
1120 }
1121 
1122 void
1123 dbg_init(void)
1124 {
1125 
1126 	/* Clear OS lock */
1127 	WRITE_SPECIALREG(OSLAR_EL1, 0);
1128 
1129 	/* This permits DDB to use debug registers for watchpoints. */
1130 	dbg_monitor_init();
1131 
1132 	/* TODO: Eventually will need to initialize debug registers here. */
1133 }
1134 
1135 #ifdef DDB
1136 #include <ddb/ddb.h>
1137 
1138 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1139 {
1140 #define	PRINT_REG(reg)	\
1141     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1142 
1143 	PRINT_REG(actlr_el1);
1144 	PRINT_REG(afsr0_el1);
1145 	PRINT_REG(afsr1_el1);
1146 	PRINT_REG(aidr_el1);
1147 	PRINT_REG(amair_el1);
1148 	PRINT_REG(ccsidr_el1);
1149 	PRINT_REG(clidr_el1);
1150 	PRINT_REG(contextidr_el1);
1151 	PRINT_REG(cpacr_el1);
1152 	PRINT_REG(csselr_el1);
1153 	PRINT_REG(ctr_el0);
1154 	PRINT_REG(currentel);
1155 	PRINT_REG(daif);
1156 	PRINT_REG(dczid_el0);
1157 	PRINT_REG(elr_el1);
1158 	PRINT_REG(esr_el1);
1159 	PRINT_REG(far_el1);
1160 #if 0
1161 	/* ARM64TODO: Enable VFP before reading floating-point registers */
1162 	PRINT_REG(fpcr);
1163 	PRINT_REG(fpsr);
1164 #endif
1165 	PRINT_REG(id_aa64afr0_el1);
1166 	PRINT_REG(id_aa64afr1_el1);
1167 	PRINT_REG(id_aa64dfr0_el1);
1168 	PRINT_REG(id_aa64dfr1_el1);
1169 	PRINT_REG(id_aa64isar0_el1);
1170 	PRINT_REG(id_aa64isar1_el1);
1171 	PRINT_REG(id_aa64pfr0_el1);
1172 	PRINT_REG(id_aa64pfr1_el1);
1173 	PRINT_REG(id_afr0_el1);
1174 	PRINT_REG(id_dfr0_el1);
1175 	PRINT_REG(id_isar0_el1);
1176 	PRINT_REG(id_isar1_el1);
1177 	PRINT_REG(id_isar2_el1);
1178 	PRINT_REG(id_isar3_el1);
1179 	PRINT_REG(id_isar4_el1);
1180 	PRINT_REG(id_isar5_el1);
1181 	PRINT_REG(id_mmfr0_el1);
1182 	PRINT_REG(id_mmfr1_el1);
1183 	PRINT_REG(id_mmfr2_el1);
1184 	PRINT_REG(id_mmfr3_el1);
1185 #if 0
1186 	/* Missing from llvm */
1187 	PRINT_REG(id_mmfr4_el1);
1188 #endif
1189 	PRINT_REG(id_pfr0_el1);
1190 	PRINT_REG(id_pfr1_el1);
1191 	PRINT_REG(isr_el1);
1192 	PRINT_REG(mair_el1);
1193 	PRINT_REG(midr_el1);
1194 	PRINT_REG(mpidr_el1);
1195 	PRINT_REG(mvfr0_el1);
1196 	PRINT_REG(mvfr1_el1);
1197 	PRINT_REG(mvfr2_el1);
1198 	PRINT_REG(revidr_el1);
1199 	PRINT_REG(sctlr_el1);
1200 	PRINT_REG(sp_el0);
1201 	PRINT_REG(spsel);
1202 	PRINT_REG(spsr_el1);
1203 	PRINT_REG(tcr_el1);
1204 	PRINT_REG(tpidr_el0);
1205 	PRINT_REG(tpidr_el1);
1206 	PRINT_REG(tpidrro_el0);
1207 	PRINT_REG(ttbr0_el1);
1208 	PRINT_REG(ttbr1_el1);
1209 	PRINT_REG(vbar_el1);
1210 #undef PRINT_REG
1211 }
1212 
1213 DB_SHOW_COMMAND(vtop, db_show_vtop)
1214 {
1215 	uint64_t phys;
1216 
1217 	if (have_addr) {
1218 		phys = arm64_address_translate_s1e1r(addr);
1219 		db_printf("EL1 physical address reg (read):  0x%016lx\n", phys);
1220 		phys = arm64_address_translate_s1e1w(addr);
1221 		db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1222 		phys = arm64_address_translate_s1e0r(addr);
1223 		db_printf("EL0 physical address reg (read):  0x%016lx\n", phys);
1224 		phys = arm64_address_translate_s1e0w(addr);
1225 		db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1226 	} else
1227 		db_printf("show vtop <virt_addr>\n");
1228 }
1229 #endif
1230